sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def patStarLines(s0):
'''make line pattern'''
arr = np.zeros((s0,s0), dtype=np.uint8)
col = 255
t = int(s0/100.)
for pos in np.linspace(0,np.pi/2,15):
p0 = int(round(np.sin(pos)*s0*2))
p1 = int(round(np.cos(pos)*s0*2))
cv2.line(arr,(0,0),(p0,p1), color=col,
thickness=t, lineType=cv2.LINE_AA )
return arr.astype(float) | make line pattern | entailment |
def patSiemensStar(s0, n=72, vhigh=255, vlow=0, antiasing=False):
'''make line pattern'''
arr = np.full((s0,s0),vlow, dtype=np.uint8)
c = int(round(s0/2.))
s = 2*np.pi/(2*n)
step = 0
for i in range(2*n):
p0 = round(c+np.sin(step)*2*s0)
p1 = round(c+np.cos(step)*2*s0)
step += s
p2 = round(c+np.sin(step)*2*s0)
p3 = round(c+np.cos(step)*2*s0)
pts = np.array(((c,c),
(p0,p1),
(p2,p3) ), dtype=int)
cv2.fillConvexPoly(arr, pts,
color=vhigh if i%2 else vlow,
lineType=cv2.LINE_AA if antiasing else 0)
arr[c,c]=0
return arr.astype(float) | make line pattern | entailment |
def patText(s0):
'''make text pattern'''
arr = np.zeros((s0,s0), dtype=np.uint8)
s = int(round(s0/100.))
p1 = 0
pp1 = int(round(s0/10.))
for pos0 in np.linspace(0,s0,10):
cv2.putText(arr, 'helloworld', (p1,int(round(pos0))),
cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s,
color=255, thickness=s,
lineType=cv2.LINE_AA )
if p1:
p1 = 0
else:
p1 = pp1
return arr.astype(float) | make text pattern | entailment |
def removeSinglePixels(img):
'''
img - boolean array
remove all pixels that have no neighbour
'''
gx = img.shape[0]
gy = img.shape[1]
for i in range(gx):
for j in range(gy):
if img[i, j]:
found_neighbour = False
for ii in range(max(0, i - 1), min(gx, i + 2)):
for jj in range(max(0, j - 1), min(gy, j + 2)):
if ii == i and jj == j:
continue
if img[ii, jj]:
found_neighbour = True
break
if found_neighbour:
break
if not found_neighbour:
img[i, j] = 0 | img - boolean array
remove all pixels that have no neighbour | entailment |
def interpolateCircular2dStructuredIDW(grid, mask, kernel=15, power=2,
fr=1, fphi=1, cx=0, cy=0):
'''
same as interpolate2dStructuredIDW
but calculation distance to neighbour using polar coordinates
fr, fphi --> weight factors for radian and radius differences
cx,cy -> polar center of the array e.g. middle->(sx//2+1,sy//2+1)
'''
gx = grid.shape[0]
gy = grid.shape[0]
#FOR EVERY PIXEL
for i in range(gx):
for j in range(gy):
if mask[i,j]:
xmn = i-kernel
if xmn < 0:
xmn = 0
xmx = i+kernel
if xmx > gx:
xmx = gx
ymn = j-kernel
if ymn < 0:
ymn = 0
ymx = j+kernel
if ymx > gx:
ymx = gy
sumWi = 0.0
value = 0.0
#radius and radian to polar center:
R = ((i-cx)**2+(j-cy)**2)**0.5
PHI = atan2(j-cy, i-cx)
#FOR EVERY NEIGHBOUR IN KERNEL
for xi in range(xmn,xmx):
for yi in range(ymn,ymx):
if (xi != i or yi != j) and not mask[xi,yi]:
nR = ((xi-cx)**2+(yi-cy)**2)**0.5
dr = R - nR
#average radius between both p:
midR = 0.5*(R+nR)
#radian of neighbour p:
nphi = atan2(yi-cy, xi-cx)
#relative angle between both points:
dphi = min((2*np.pi) - abs(PHI - nphi),
abs(PHI - nphi))
dphi*=midR
dist = ((fr*dr)**2+(fphi*dphi)**2)**2
wi = 1 / dist**(0.5*power)
sumWi += wi
value += wi * grid[xi,yi]
if sumWi:
grid[i,j] = value / sumWi
return grid | same as interpolate2dStructuredIDW
but calculation distance to neighbour using polar coordinates
fr, fphi --> weight factors for radian and radius differences
cx,cy -> polar center of the array e.g. middle->(sx//2+1,sy//2+1) | entailment |
def interpolate2dStructuredCrossAvg(grid, mask, kernel=15, power=2):
'''
#######
usefull if large empty areas need to be filled
'''
vals = np.empty(shape=4, dtype=grid.dtype)
dist = np.empty(shape=4, dtype=np.uint16)
weights = np.empty(shape=4, dtype=np.float32)
valid = np.empty(shape=4, dtype=bool)
return _calc(grid, mask, power, kernel, vals, dist, weights, valid) | #######
usefull if large empty areas need to be filled | entailment |
def growPositions(ksize):
'''
return all positions around central point (0,0)
for a given kernel size
positions grow from smallest to biggest distances
returns [positions] and [distances] from central cell
'''
i = ksize*2+1
kk = np.ones( (i, i), dtype=bool)
x,y = np.where(kk)
pos = np.empty(shape=(i,i,2), dtype=int)
pos[:,:,0]=x.reshape(i,i)-ksize
pos[:,:,1]=y.reshape(i,i)-ksize
dist = np.fromfunction(lambda x,y: ((x-ksize)**2
+(y-ksize)**2)**0.5, (i,i))
pos = np.dstack(
np.unravel_index(
np.argsort(dist.ravel()), (i, i)))[0,1:]
pos0 = pos[:,0]
pos1 = pos[:,1]
return pos-ksize, dist[pos0, pos1] | return all positions around central point (0,0)
for a given kernel size
positions grow from smallest to biggest distances
returns [positions] and [distances] from central cell | entailment |
def qImageToArray(qimage, dtype = 'array'):
"""Convert QImage to numpy.ndarray. The dtype defaults to uint8
for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array)
for 32bit color images. You can pass a different dtype to use, or
'array' to get a 3D uint8 array for color images."""
result_shape = (qimage.height(), qimage.width())
temp_shape = (qimage.height(),
qimage.bytesPerLine() * 8 // qimage.depth())
if qimage.format() in (QtGui.QImage.Format_ARGB32_Premultiplied,
QtGui.QImage.Format_ARGB32,
QtGui.QImage.Format_RGB32):
if dtype == 'rec':
dtype = np.dtype({'b': (np.uint8, 0),
'g': (np.uint8, 1),
'r': (np.uint8, 2),
'a': (np.uint8, 3)})
elif dtype == 'array':
dtype = np.uint8
result_shape += (4, )
temp_shape += (4, )
elif qimage.format() == QtGui.QImage.Format_Indexed8:
dtype = np.uint8
else:
raise ValueError("qimage2numpy only supports 32bit and 8bit images")
# FIXME: raise error if alignment does not match
buf = qimage.bits().asstring(qimage.byteCount())
result = np.frombuffer(buf, dtype).reshape(temp_shape)
if result_shape != temp_shape:
result = result[:,:result_shape[1]]
if qimage.format() == QtGui.QImage.Format_RGB32 and dtype == np.uint8:
#case byteorder == 'little'
result = result[...,:3]
#byteorder == 'big' -> get ARGB
result = result[...,::-1]
return result | Convert QImage to numpy.ndarray. The dtype defaults to uint8
for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array)
for 32bit color images. You can pass a different dtype to use, or
'array' to get a 3D uint8 array for color images. | entailment |
def varYSizeGaussianFilter(arr, stdyrange, stdx=0,
modex='wrap', modey='reflect'):
'''
applies gaussian_filter on input array
but allowing variable ksize in y
stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value
stdyrange(tuple,list) -> minimum and maximum size as (mn,mx)
stdyrange(np.array) -> all different ksizes in y
'''
assert arr.ndim == 2, 'only works on 2d arrays at the moment'
s0 = arr.shape[0]
#create stdys:
if isinstance(stdyrange, np.ndarray):
assert len(stdyrange)==s0, '[stdyrange] needs to have same length as [arr]'
stdys = stdyrange
else:
if type(stdyrange) not in (list, tuple):
stdyrange = (0,stdyrange)
mn,mx = stdyrange
stdys = np.linspace(mn,mx,s0)
#prepare array for convolution:
kx = int(stdx*2.5)
kx += 1-kx%2
ky = int(mx*2.5)
ky += 1-ky%2
arr2 = extendArrayForConvolution(arr, (kx, ky), modex, modey)
#create convolution kernels:
inp = np.zeros((ky,kx))
inp[ky//2, kx//2] = 1
kernels = np.empty((s0,ky,kx))
for i in range(s0):
stdy = stdys[i]
kernels[i] = gaussian_filter(inp, (stdy,stdx))
out = np.empty_like(arr)
_2dConvolutionYdependentKernel(arr2, out, kernels)
return out | applies gaussian_filter on input array
but allowing variable ksize in y
stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value
stdyrange(tuple,list) -> minimum and maximum size as (mn,mx)
stdyrange(np.array) -> all different ksizes in y | entailment |
def numbaGaussian2d(psf, sy, sx):
'''
2d Gaussian to be used in numba code
'''
ps0, ps1 = psf.shape
c0,c1 = ps0//2, ps1//2
ssx = 2*sx**2
ssy = 2*sy**2
for i in range(ps0):
for j in range(ps1):
psf[i,j]=exp( -( (i-c0)**2/ssy
+(j-c1)**2/ssx) )
psf/=psf.sum() | 2d Gaussian to be used in numba code | entailment |
def estimateBackgroundLevel(img, image_is_artefact_free=False,
min_rel_size=0.05, max_abs_size=11):
'''
estimate background level through finding the most homogeneous area
and take its average
min_size - relative size of the examined area
'''
s0,s1 = img.shape[:2]
s = min(max_abs_size, int(max(s0,s1)*min_rel_size))
arr = np.zeros(shape=(s0-2*s, s1-2*s), dtype=img.dtype)
#fill arr:
_spatialStd(img, arr, s)
#most homogeneous area:
i,j = np.unravel_index(arr.argmin(), arr.shape)
sub = img[int(i+0.5*s):int(i+s*1.5),
int(j+s*0.5):int(j+s*1.5)]
return np.median(sub) | estimate background level through finding the most homogeneous area
and take its average
min_size - relative size of the examined area | entailment |
def EL_Si_module():
'''
returns angular dependent EL emissivity of a PV module
calculated of nanmedian(persp-corrected EL module/reference module)
published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices
PhD Thesis, 2017
'''
arr = np.array([
[2.5, 1.00281 ],
[7.5, 1.00238 ],
[12.5, 1.00174],
[17.5, 1.00204 ],
[22.5, 1.00054 ],
[27.5, 0.998255],
[32.5, 0.995351],
[37.5, 0.991246],
[42.5, 0.985304],
[47.5, 0.975338],
[52.5, 0.960455],
[57.5, 0.937544],
[62.5, 0.900607],
[67.5, 0.844636],
[72.5, 0.735028],
[77.5, 0.57492 ],
[82.5, 0.263214],
[87.5, 0.123062]
])
angles = arr[:,0]
vals = arr[:,1]
vals[vals>1]=1
return angles, vals | returns angular dependent EL emissivity of a PV module
calculated of nanmedian(persp-corrected EL module/reference module)
published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices
PhD Thesis, 2017 | entailment |
def TG_glass():
'''
reflected temperature for 250DEG Glass
published in IEC 62446-3 TS: Photovoltaic (PV) systems
- Requirements for testing, documentation and maintenance
- Part 3: Outdoor infrared thermography of photovoltaic modules
and plants p Page 12
'''
vals = np.array([(80,0.88),
(75,0.88),
(70,0.88),
(65,0.88),
(60,0.88),
(55,0.88),
(50,0.87),
(45,0.86),
(40,0.85),
(35,0.83),
(30,0.80),
(25,0.76),
(20,0.7),
(15,0.60),
(10,0.44)])
#invert angle reference:
vals[:,0]=90-vals[:,0]
#make emissivity relative:
vals[:,1]/=vals[0,1]
return vals[:,0], vals[:,1] | reflected temperature for 250DEG Glass
published in IEC 62446-3 TS: Photovoltaic (PV) systems
- Requirements for testing, documentation and maintenance
- Part 3: Outdoor infrared thermography of photovoltaic modules
and plants p Page 12 | entailment |
def sensitivity(imgs, bg=None):
'''
Extract pixel sensitivity from a set of homogeneously illuminated images
This method is detailed in Section 5 of:
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
'''
bg = getBackground(bg)
for n, i in enumerate(imgs):
i = imread(i, dtype=float)
i -= bg
smooth = fastMean(median_filter(i, 3))
i /= smooth
if n == 0:
out = i
else:
out += i
out /= (n + 1)
return out | Extract pixel sensitivity from a set of homogeneously illuminated images
This method is detailed in Section 5 of:
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
--- | entailment |
def navierStokes2d(u, v, p, dt, nt, rho, nu,
boundaryConditionUV,
boundardConditionP, nit=100):
'''
solves the Navier-Stokes equation for incompressible flow
one a regular 2d grid
u,v,p --> initial velocity(u,v) and pressure(p) maps
dt --> time step
nt --> number of time steps to caluclate
rho, nu --> material constants
nit --> number of iteration to solve the pressure field
'''
#next u, v, p maps:
un = np.empty_like(u)
vn = np.empty_like(v)
pn = np.empty_like(p)
#poisson equation ==> laplace term = b[source term]
b = np.zeros_like(p)
ny,nx = p.shape
#cell size:
dx = 2 / (nx - 1)
dy = 2 / (ny - 1)
#next time step:
for _ in range(nt):
un[:] = u
vn[:] = v
#pressure
_buildB(b, rho, dt, u, v, dx, dy)
for _ in range(nit):
_pressurePoisson(p, pn, dx, dy, b)
boundardConditionP(p)
#UV
_calcUV(u, v, un, p,vn, dt, dx, dy, rho, nu)
boundaryConditionUV(u,v)
return u, v, p | solves the Navier-Stokes equation for incompressible flow
one a regular 2d grid
u,v,p --> initial velocity(u,v) and pressure(p) maps
dt --> time step
nt --> number of time steps to caluclate
rho, nu --> material constants
nit --> number of iteration to solve the pressure field | entailment |
def shiftImage(u, v, t, img, interpolation=cv2.INTER_LANCZOS4):
'''
remap an image using velocity field
'''
ny,nx = u.shape
sy, sx = np.mgrid[:float(ny):1,:float(nx):1]
sx += u*t
sy += v*t
return cv2.remap(img.astype(np.float32),
(sx).astype(np.float32),
(sy).astype(np.float32), interpolation) | remap an image using velocity field | entailment |
def addNoise(img, snr=25, rShot=0.5):
'''
adds Gaussian (thermal) and shot noise to [img]
[img] is assumed to be noise free
[rShot] - shot noise ratio relative to all noise
'''
s0, s1 = img.shape[:2]
m = img.mean()
if np.isnan(m):
m = np.nanmean(img)
assert m != 0, 'image mean cannot be zero'
img = img / m
noise = np.random.normal(size=s0 * s1).reshape(s0, s1)
if rShot > 0:
noise *= (rShot * img**0.5 + 1)
noise /= np.nanstd(noise)
noise[np.isnan(noise)] = 0
return m * (img + noise / snr) | adds Gaussian (thermal) and shot noise to [img]
[img] is assumed to be noise free
[rShot] - shot noise ratio relative to all noise | entailment |
def coarseMaximum(arr, shape):
'''
return an array of [shape]
where every cell equals the localised maximum of the given array [arr]
at the same (scalled) position
'''
ss0, ss1 = shape
s0, s1 = arr.shape
pos0 = linspace2(0, s0, ss0, dtype=int)
pos1 = linspace2(0, s1, ss1, dtype=int)
k0 = pos0[0]
k1 = pos1[0]
out = np.empty(shape, dtype=arr.dtype)
_calc(arr, out, pos0, pos1, k0, k1, ss0, ss1)
return out | return an array of [shape]
where every cell equals the localised maximum of the given array [arr]
at the same (scalled) position | entailment |
def angleOfView(XY, shape=None, a=None, f=None, D=None, center=None):
'''
Another vignetting equation from:
M. Koentges, M. Siebert, and D. Hinken, "Quantitative analysis of PV-modules by electroluminescence images for quality control"
2009
f --> Focal length
D --> Diameter of the aperture
BOTH, D AND f NEED TO HAVE SAME UNIT [PX, mm ...]
a --> Angular aperture
center -> optical center [y,x]
'''
if a is None:
assert f is not None and D is not None
#https://en.wikipedia.org/wiki/Angular_aperture
a = 2*np.arctan2(D/2,f)
x,y = XY
try:
c0,c1 = center
except:
s0,s1 = shape
c0,c1 = s0/2, s1/2
rx = (x-c0)**2
ry = (y-c1)**2
return 1 / (1+np.tan(a)*((rx+ry)/c0))**0.5 | Another vignetting equation from:
M. Koentges, M. Siebert, and D. Hinken, "Quantitative analysis of PV-modules by electroluminescence images for quality control"
2009
f --> Focal length
D --> Diameter of the aperture
BOTH, D AND f NEED TO HAVE SAME UNIT [PX, mm ...]
a --> Angular aperture
center -> optical center [y,x] | entailment |
def angleOfView2(x,y, b, x0=None,y0=None):
'''
Corrected AngleOfView equation by Koentges (via mail from 14/02/2017)
b --> distance between the camera and the module in m
x0 --> viewable with in the module plane of the camera in m
y0 --> viewable height in the module plane of the camera in m
x,y --> pixel position [m] from top left
'''
if x0 is None:
x0 = x[-1,-1]
if y0 is None:
y0 = y[-1,-1]
return np.cos( np.arctan( np.sqrt(
( (x-x0/2)**2+(y-y0/2)**2 ) ) /b ) ) | Corrected AngleOfView equation by Koentges (via mail from 14/02/2017)
b --> distance between the camera and the module in m
x0 --> viewable with in the module plane of the camera in m
y0 --> viewable height in the module plane of the camera in m
x,y --> pixel position [m] from top left | entailment |
def gridLinesFromVertices(edges, nCells, subgrid=None, dtype=float):
"""
###TODO REDO TXT
OPTIONAL:
subgrid = ([x],[y]) --> relative positions
e.g. subgrid = ( (0.3,0.7), () )
--> two subgrid lines in x - nothing in y
Returns:
horiz,vert -> arrays of (x,y) poly-lines
if subgrid != None, Returns:
horiz,vert, subhoriz, subvert
#######
creates a regular 2d grid from given edge points (4*(x0,y0))
and number of cells in x and y
Returns:
tuple(4lists): horizontal and vertical lines as (x0,y0,x1,y1)
"""
nx, ny = nCells
y, x = np.mgrid[0.:ny + 1, 0.:nx + 1]
src = np.float32([[0, 0], [nx, 0], [nx, ny], [0, ny]])
dst = sortCorners(edges).astype(np.float32)
homography = cv2.getPerspectiveTransform(src, dst)
pts = np.float32((x.flatten(), y.flatten())).T
pts = pts.reshape(1, *pts.shape)
pts2 = cv2.perspectiveTransform(pts, homography)[0]
horiz = pts2.reshape(ny + 1, nx + 1, 2)
vert = np.swapaxes(horiz, 0, 1)
subh, subv = [], []
if subgrid is not None:
sh, sv = subgrid
if len(sh):
subh = np.empty(shape=(ny * len(sh), nx + 1, 2), dtype=np.float32)
last_si = 0
for n, si in enumerate(sh):
spts = pts[:, :-(nx + 1)]
spts[..., 1] += si - last_si
last_si = si
spts2 = cv2.perspectiveTransform(spts, homography)[0]
subh[n::len(sh)] = spts2.reshape(ny, nx + 1, 2)
if len(sv):
subv = np.empty(shape=(ny + 1, nx * len(sv), 2), dtype=np.float32)
last_si = 0
sspts = pts.reshape(1, ny + 1, nx + 1, 2)
sspts = sspts[:, :, :-1]
sspts = sspts.reshape(1, (ny + 1) * nx, 2)
for n, si in enumerate(sv):
sspts[..., 0] += si - last_si
last_si = si
spts2 = cv2.perspectiveTransform(sspts, homography)[0]
subv[:, n::len(sv)] = spts2.reshape(ny + 1, nx, 2)
subv = np.swapaxes(subv, 0, 1)
return [horiz, vert, subh, subv] | ###TODO REDO TXT
OPTIONAL:
subgrid = ([x],[y]) --> relative positions
e.g. subgrid = ( (0.3,0.7), () )
--> two subgrid lines in x - nothing in y
Returns:
horiz,vert -> arrays of (x,y) poly-lines
if subgrid != None, Returns:
horiz,vert, subhoriz, subvert
#######
creates a regular 2d grid from given edge points (4*(x0,y0))
and number of cells in x and y
Returns:
tuple(4lists): horizontal and vertical lines as (x0,y0,x1,y1) | entailment |
def MTF50(self, MTFx,MTFy):
'''
return object resolution as [line pairs/mm]
where MTF=50%
see http://www.imatest.com/docs/sharpness/
'''
if self.mtf_x is None:
self.MTF()
f = UnivariateSpline(self.mtf_x, self.mtf_y-0.5)
return f.roots()[0] | return object resolution as [line pairs/mm]
where MTF=50%
see http://www.imatest.com/docs/sharpness/ | entailment |
def MTF(self, px_per_mm):
'''
px_per_mm = cam_resolution / image_size
'''
res = 100 #numeric resolution
r = 4 #range +-r*std
#size of 1 px:
px_size = 1 / px_per_mm
#standard deviation of the point-spread-function (PSF) as normal distributed:
std = self.std*px_size #transform standard deviation from [px] to [mm]
x = np.linspace(-r*std,r*std, res)
#line spread function:
lsf = self.gaussian1d(x, 1, 0, std)
#MTF defined as Fourier transform of the line spread function:
#abs() because result is complex
y = abs(np.fft.fft(lsf))
#normalize fft so that max = 1
y /= np.max(y)
#step length between xn and xn+1
dstep = r*std/res
# Fourier frequencies - here: line pairs(cycles) per mm
freq = np.fft.fftfreq(lsf.size, dstep)
#limit mtf between [0-px_per_mm]:
i = np.argmax(freq>px_per_mm)
self.mtf_x = freq[:i]
self.mtf_y = y[:i]
return self.mtf_x, self.mtf_y | px_per_mm = cam_resolution / image_size | entailment |
def uncertaintyMap(self, psf, method='convolve', fitParams=None):
'''
return the intensity based uncertainty due to the unsharpness of the image
as standard deviation
method = ['convolve' , 'unsupervised_wiener']
latter one also returns the reconstructed image (deconvolution)
'''
#ignore background:
#img[img<0]=0
###noise should not influence sharpness uncertainty:
##img = median_filter(img, 3)
# decrease noise in order not to overestimate result:
img = scaleSignal(self.img, fitParams=fitParams)
if method == 'convolve':
#print 'convolve'
blurred = convolve2d(img, psf, 'same')
m = abs(img-blurred) / abs(img + blurred)
m = np.nan_to_num(m)
m*=self.std**2
m[m>1]=1
self.blur_distortion = m
np.save('blurred', blurred)
return m
else:
restored = unsupervised_wiener(img, psf)[0]
m = abs(img-restored) / abs(img + restored)
m = np.nan_to_num(m)
m*=self.std**2
m[m>1]=1
self.blur_distortion = m
return m, restored | return the intensity based uncertainty due to the unsharpness of the image
as standard deviation
method = ['convolve' , 'unsupervised_wiener']
latter one also returns the reconstructed image (deconvolution) | entailment |
def stdDev(self):
'''
get the standard deviation
from the PSF is evaluated as 2d Gaussian
'''
if self._corrPsf is None:
self.psf()
p = self._corrPsf.copy()
mn = p.min()
p[p<0.05*p.max()] = mn
p-=mn
p/=p.sum()
x,y = self._psfGridCoords()
x = x.flatten()
y = y.flatten()
guess = (1,1,0)
param, _ = curve_fit(self._fn, (x,y), p.flatten(), guess)
self._fitParam = param
stdx,stdy = param[:2]
self._std = (stdx+stdy) / 2
return self._std | get the standard deviation
from the PSF is evaluated as 2d Gaussian | entailment |
def interpolate2dStructuredIDW(grid, mask, kernel=15, power=2, fx=1, fy=1):
'''
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
'''
weights = np.empty(shape=((2*kernel+1,2*kernel+1)))
for xi in range(-kernel,kernel+1):
for yi in range(-kernel,kernel+1):
dist = ((fx*xi)**2+(fy*yi)**2)
if dist:
weights[xi+kernel,yi+kernel] = 1 / dist**(0.5*power)
return _calc(grid, mask, kernel, weights) | replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power] | entailment |
def temporalSignalStability(imgs, times, down_scale_factor=1):
'''
(Electroluminescence) signal is not stable over time
especially next to cracks.
This function takes a set of images
and returns parameters, needed to transform uncertainty
to other exposure times using [adjustUncertToExposureTime]
return [signal uncertainty] obtained from linear fit to [imgs]
[average event length]
[ascent],[offset] of linear fit
--------
[imgs] --> corrected EL images captured in sequence
[times] --> absolute measurement times of all [imgs]
e.g. every image was taken every 60 sec, then
times=60,120,180...
[down_scale_factor] --> down scale [imgs] to speed up process
-------
More information can be found at ...
----
K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017
Subsection 5.1.4.3: Exposure Time Dependency
----
'''
imgs = np.asarray(imgs)
s0, s1, s2 = imgs.shape
#down scale imgs to speed up process:
if down_scale_factor > 1:
s1 //= down_scale_factor
s2 //= down_scale_factor
imgs2 = np.empty(shape=(s0, s1, s2))
for n, c in enumerate(imgs):
imgs2[n] = cv2.resize(c, (s2, s1), interpolation=cv2.INTER_AREA)
imgs = imgs2
# linear fit for every point in image set:
ascent, offset, error = linRegressUsingMasked2dArrays(
times, imgs, calcError=True)
# functionally obtained [imgs]:
fn_imgs = np.array([offset + t * ascent for t in times])
#difference between [imgs] for fit result:
diff = imgs - fn_imgs
diff = median_filter(diff, 5)
error_t = np.tile(error, (s0, 1, 1))
# find events:
evt = (np.abs(diff) > 0.5 * error_t)
# calc average event length:
avlen = _calcAvgLen(evt, np.empty(shape=evt.shape[1:]))
#cannot calc event length smaller exposure time, so:
i = avlen == 0
avlen = maskedFilter(avlen, mask=i, fn='mean', ksize=7, fill_mask=False)
# remove single px:
i = maximum_filter(i, 3)
avlen[i] = 0
avlen = maximum_filter(avlen, 3)
i = avlen == 0
avlen = median_filter(avlen, 3)
avlen[i] = 0
return error, avlen, ascent, offset | (Electroluminescence) signal is not stable over time
especially next to cracks.
This function takes a set of images
and returns parameters, needed to transform uncertainty
to other exposure times using [adjustUncertToExposureTime]
return [signal uncertainty] obtained from linear fit to [imgs]
[average event length]
[ascent],[offset] of linear fit
--------
[imgs] --> corrected EL images captured in sequence
[times] --> absolute measurement times of all [imgs]
e.g. every image was taken every 60 sec, then
times=60,120,180...
[down_scale_factor] --> down scale [imgs] to speed up process
-------
More information can be found at ...
----
K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017
Subsection 5.1.4.3: Exposure Time Dependency
---- | entailment |
def vignettingFromSpotAverage(
images, bgImages=None, averageSpot=True, thresh=None):
'''
[images] --> list of images containing
small bright spots generated by the same
device images at different positions within image plane
depending on the calibrated waveband the device can be
a LCD display or PV 1-cell mini module
This method is referred as 'Method B' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
Args:
averageSpot(bool): True: take only the average intensity of each spot
thresh(float): marks the minimum spot value
(estimated with Otsus method otherwise)
Returns:
* array to be post processed
* image mask containing valid positions
'''
fitimg, mask = None, None
mx = 0
for c, img in enumerate(images):
print('%s/%s' % (c + 1, len(images)))
if c == 0:
avgBg = getBackground2(bgImages, img)
img = imread(img, dtype=float)
img -= avgBg
# init:
if fitimg is None:
fitimg = np.zeros_like(img)
mask = np.zeros_like(img, dtype=bool)
# find spot:
if thresh is None:
t = threshold_otsu(img)
else:
t = thresh
# take brightest spot
spots, n = label(minimum_filter(img > t, 3),
background=0, return_num=True)
spot_sizes = [(spots == i).sum() for i in range(1, n + 1)]
try:
spot = (spots == np.argmax(spot_sizes) + 1)
except ValueError:
print("couldn't find spot in image")
continue
if averageSpot:
spot = np.rint(center_of_mass(spot)).astype(int)
mx2 = img[spot].max()
else:
mx2 = img[spot].mean()
fitimg[spot] = img[spot]
mask[spot] = 1
if mx2 > mx:
mx = mx2
# scale [0...1]:
fitimg /= mx
return fitimg, mask | [images] --> list of images containing
small bright spots generated by the same
device images at different positions within image plane
depending on the calibrated waveband the device can be
a LCD display or PV 1-cell mini module
This method is referred as 'Method B' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
Args:
averageSpot(bool): True: take only the average intensity of each spot
thresh(float): marks the minimum spot value
(estimated with Otsus method otherwise)
Returns:
* array to be post processed
* image mask containing valid positions | entailment |
def simulateSytematicError(N_SAMPLES=5, N_IMAGES=10,
SHOW_DETECTED_PATTERN=True, # GRAYSCALE=False,
HEIGHT=500, PLOT_RESULTS=True, PLOT_ERROR_ARRAY=True,
CAMERA_PARAM=None, PERSPECTIVE=True, ROTATION=True,
RELATIVE_PATTERN_SIZE=0.5, POSITION=True,
NOISE=25, BLUR=(3, 3), PATTERNS=None):
'''
Simulates a lens calibration using synthetic images
* images are rendered under the given HEIGHT resolution
* noise and smoothing is applied
* perspective and position errors are applied
* images are deformed using the given CAMERA_PARAM
* the detected camera parameters are used to calculate the error to the given ones
simulation
-----------
N_IMAGES -> number of images to take for a camera calibration
N_SAMPLES -> number of camera calibrations of each pattern type
output
--------
SHOW_DETECTED_PATTERN: print each image and detected pattern to screen
PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters
PLOT_ERROR_ARRAY: plot position error for the lens correction
pattern
--------
this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles
GRAYSCALE: whether to load the pattern as gray scale
RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%)
PERSPECTIVE: [True] -> enable perspective distortion
ROTATION: [True] -> enable rotation of the pattern
BLUR: False or (sizex,sizey), like (3,3)
CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2]
'''
print(
'calculate systematic error of the implemented calibration algorithms')
# LOCATION OF PATTERN IMAGES
folder = MEDIA_PATH
if PATTERNS is None:
PATTERNS = ('Chessboard', 'Asymmetric circles', 'Symmetric circles')
patterns = OrderedDict(( # n of inner corners
('Chessboard', ((6, 9), 'chessboard_pattern_a3.svg')),
('Asymmetric circles', ((4, 11), 'acircles_pattern_a3.svg')),
('Symmetric circles', ((8, 11), 'circles_pattern_a3.svg')),
))
# REMOVE PATTERNS THAT ARE NOT TO BE TESTED:
[patterns.pop(key) for key in patterns if key not in PATTERNS]
if SHOW_DETECTED_PATTERN:
cv2.namedWindow('Pattern', cv2.WINDOW_NORMAL)
# number of positive detected patterns:
success = []
# list[N_SAMPLES] of random camera parameters
fx, fy, cx, cy, k1, k2, k3, p1, p2 = [], [], [], [], [], [], [], [], []
# list[Method, N_SAMPLES] of given-detected parameters:
errl, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l = [
], [], [], [], [], [], [], [], [], []
# list[Method, N_SAMPLES] of magnitude(difference of displacement vector
# array):
dxl = []
dyl = []
# maintain aspect ratio of din a4, a3...:
aspect_ratio_DIN = 2.0**0.5
width = int(round(HEIGHT / aspect_ratio_DIN))
if CAMERA_PARAM is None:
CAMERA_PARAM = [
HEIGHT, HEIGHT, HEIGHT / 2, width / 2, 0.0, 0.01, 0.1, 0.01, 0.001]
# ???CREATE N DIFFERENT RANDOM LENS ERRORS:
for n in range(N_SAMPLES):
# TODO: RANDOMIZE CAMERA ERROR??
fx.append(CAMERA_PARAM[0]) # * np.random.uniform(1, 2) )
fy.append(CAMERA_PARAM[1]) # * np.random.uniform(1, 2) )
cx.append(CAMERA_PARAM[2]) # * np.random.uniform(0.9, 1.1) )
cy.append(CAMERA_PARAM[3]) # * np.random.uniform(0.9, 1.1) )
k1.append(CAMERA_PARAM[4]) # + np.random.uniform(-1, 1)*0.1)
k2.append(CAMERA_PARAM[5]) # + np.random.uniform(-1, 1)*0.01)
p1.append(CAMERA_PARAM[6]) # + np.random.uniform(0, 1)*0.1)
p2.append(CAMERA_PARAM[7]) # + np.random.uniform(0, 1)*0.01)
k3.append(CAMERA_PARAM[8]) # + np.random.uniform(0, 1)*0.001)
L = LensDistortion()
# FOR EVERY METHOD:
for method, (board_size, filename) in patterns.items():
f = folder.join(filename)
# LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION:
svg = QtSvg.QSvgRenderer(f)
image = QtGui.QImage(width * 4, HEIGHT * 4, QtGui.QImage.Format_ARGB32)
image.fill(QtCore.Qt.white)
# Get QPainter that paints to the image
painter = QtGui.QPainter(image)
svg.render(painter)
# Save, image format based on file extension
# f = "rendered.png"
# image.save(f)
#
# if GRAYSCALE:
# img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
# else:
# img = cv2.imread(f)
img = qImageToArray(image)
success.append([])
fxl.append([])
errl.append([])
fyl.append([])
cxl.append([])
cyl.append([])
k1l.append([])
k2l.append([])
k3l.append([])
p1l.append([])
p2l.append([])
dxl.append([])
dyl.append([])
imgHeight, imgWidth = img.shape[0], img.shape[1]
for n in range(N_SAMPLES):
L.calibrate(board_size, method)
print('SET PARAMS:', fx[n], fy[n], cx[n],
cy[n], k1[n], k2[n], k3[n], p1[n], p2[n])
L.setCameraParams(
fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n])
L._coeffs['shape'] = (imgHeight, imgWidth)
hw = imgWidth * 0.5
hh = imgHeight * 0.5
for m in range(N_IMAGES):
pts1 = np.float32([[hw, hh + 100],
[hw - 100, hh - 100],
[hw + 100, hh - 100]])
pts2 = pts1.copy()
if ROTATION:
rotatePolygon(pts2, np.random.randint(0, 2 * np.pi))
if PERSPECTIVE:
# CREATE A RANDOM PERSPECTIVE:
pts2 += np.random.randint(-hw *
0.05, hh * 0.05, size=(3, 2))
# MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE:
pts2 *= RELATIVE_PATTERN_SIZE
# MOVE TO THE CENTER
pts2[:, 0] += hw * (1 - RELATIVE_PATTERN_SIZE)
pts2[:, 1] += hh * (1 - RELATIVE_PATTERN_SIZE)
if POSITION:
f = ((2 * np.random.rand(2)) - 1)
pts2[:, 0] += hw * 0.7 * f[0] * (1 - RELATIVE_PATTERN_SIZE)
pts2[:, 1] += hh * 0.7 * f[1] * (1 - RELATIVE_PATTERN_SIZE)
# EXEC PERSPECTICE, POSITION, ROTATION:
M = cv2.getAffineTransform(pts1, pts2)
img_warped = cv2.warpAffine(
img, M, (imgWidth, imgHeight), borderValue=(230, 230, 230))
# DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR
# WARPING DOWN
img_warped = cv2.resize(img_warped, (width, HEIGHT))
# CREATE THE LENS DISTORTION:
mapx, mapy = L.getDistortRectifyMap(width, HEIGHT)
# print 664, mapx.shape
img_distorted = cv2.remap(
img_warped, mapx, mapy, cv2.INTER_LINEAR, borderValue=(230, 230, 230))
# img_distorted[img_distorted==0]=20
# img_distorted[img_distorted>100]=230
if BLUR:
img_distorted = cv2.blur(img_distorted, BLUR)
if NOISE:
# soften, black and white more gray, and add noise
img_distorted = img_distorted.astype(np.int16)
img_distorted += (np.random.rand(*img_distorted.shape) *
NOISE).astype(img_distorted.dtype)
img_distorted = np.clip(
img_distorted, 0, 255).astype(np.uint8)
# plt.imshow(img_distorted)
# plt.show()
found = L.addImg(img_distorted)
if SHOW_DETECTED_PATTERN and found:
img_distorted = L.drawChessboard(img_distorted)
cv2.imshow('Pattern', img_distorted)
cv2.waitKey(1)
success[-1].append(L.findCount)
try:
L._coeffs = None
errl[-1].append(L.coeffs['reprojectionError'])
L.correct(img_distorted)
c = L.getCameraParams()
print('GET PARAMS:', c)
fxl[-1].append(fx[n] - c[0])
fyl[-1].append(fy[n] - c[1])
cxl[-1].append(cx[n] - c[2])
cyl[-1].append(cy[n] - c[3])
k1l[-1].append(k1[n] - c[4])
k2l[-1].append(k2[n] - c[5])
k3l[-1].append(k3[n] - c[6])
p1l[-1].append(p1[n] - c[7])
p2l[-1].append(p2[n] - c[8])
if PLOT_ERROR_ARRAY:
dx = (mapx - L.mapx) / 2
dy = (mapy - L.mapy) / 2
dxl[-1].append(dx)
dyl[-1].append(dy)
except NothingFound:
print(
"Couldn't create a calibration because no patterns were detected")
del painter
# AVERAGE SAMPLES AND GET STD
dx_std, dx_mean = [], []
dy_std, dy_mean = [], []
mag = []
std = []
for patterndx, patterndy in zip(dxl, dyl):
x = np.mean(patterndx, axis=0)
dx_mean.append(x)
y = np.mean(patterndy, axis=0)
dy_mean.append(y)
x = np.std(patterndx, axis=0)
mag.append((x**2 + y**2)**0.5)
dx_std.append(x)
y = np.std(patterndy, axis=0)
dy_std.append(y)
std.append((x**2 + y**2)**0.5)
# PLOT
p = len(patterns)
if PLOT_RESULTS:
fig, axs = plt.subplots(nrows=2, ncols=5)
axs = np.array(axs).ravel()
for ax, typ, tname in zip(axs,
(success, fxl, fyl, cxl, cyl,
k1l, k2l, k3l, p1l, p2l),
('Success rate', 'fx', 'fy', 'cx',
'cy', 'k1', 'k2', 'k3', 'p1', 'p2')
):
ax.set_title(tname)
# , showmeans=True, meanline=True)#labels=patterns.keys())
ax.boxplot(typ, notch=0, sym='+', vert=1, whis=1.5)
# , ha=ha[n])
ax.set_xticklabels(patterns.keys(), rotation=40, fontsize=8)
if PLOT_ERROR_ARRAY:
mmin = np.min(mag)
mmax = np.max(mag)
smin = np.min(std)
smax = np.max(std)
plt.figure()
for n, pattern in enumerate(patterns.keys()):
plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g')
plt.title(pattern)
plt.imshow(mag[n], origin='upper', vmin=mmin, vmax=mmax)
if n == p - 1:
plt.colorbar(label='Average')
plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g')
plt.title(pattern)
plt.imshow(std[n], origin='upper', vmin=smin, vmax=smax)
if n == p - 1:
plt.colorbar(label='Standard deviation')
fig = plt.figure()
fig.suptitle('Individually scaled')
for n, pattern in enumerate(patterns.keys()):
# downscale - show max 30 arrows each dimension
sy, sx = dx_mean[n].shape
ix = int(sx / 15)
if ix < 1:
ix = 1
iy = int(sy / 15)
if iy < 1:
iy = 1
Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix))
plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g')
plt.title(pattern)
plt.imshow(mag[n], origin='upper')
plt.colorbar()
plt.quiver(
X, Y, dy_mean[n][::ix, ::iy] * 20, dx_mean[n][::ix, ::iy] * 20)
plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g')
plt.title(pattern)
plt.imshow(std[n], origin='upper')
plt.colorbar()
# plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10)
#############################################
fig = plt.figure()
fig.suptitle('Spatial uncertainty + deflection')
for n, pattern in enumerate(patterns.keys()):
L.calibrate(board_size, method)
# there is alot of additional calc thats not necassary:
L.setCameraParams(
fx[0], fy[0], cx[0], cy[0], k1[0], k2[0], k3[0], p1[0], p2[0])
L._coeffs['shape'] = (imgHeight, imgWidth)
L._coeffs['reprojectionError'] = np.mean(errl[n])
# deflection_x, deflection_y = L.getDeflection(width, HEIGHT)
# deflection_x += dx_mean[n]
# deflection_y += dy_mean[n]
ux, uy = L.standardUncertainties()
plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g')
plt.title(pattern)
plt.imshow(mag[n], origin='upper')
plt.colorbar()
# DEFLECTION
plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g')
plt.title(pattern)
plt.imshow(np.linalg.norm([ux, uy], axis=0), origin='upper')
plt.colorbar()
# DEFL: VECTORS
# downscale - show max 30 arrows each dimension
sy, sx = dx_mean[n].shape
ix = int(sx / 15)
if ix < 1:
ix = 1
iy = int(sy / 15)
if iy < 1:
iy = 1
Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix))
plt.quiver(X, Y, ux[::ix, ::iy] * 20, uy[::ix, ::iy] * 20)
if PLOT_ERROR_ARRAY or PLOT_RESULTS:
plt.show()
return dx_mean, dy_mean | Simulates a lens calibration using synthetic images
* images are rendered under the given HEIGHT resolution
* noise and smoothing is applied
* perspective and position errors are applied
* images are deformed using the given CAMERA_PARAM
* the detected camera parameters are used to calculate the error to the given ones
simulation
-----------
N_IMAGES -> number of images to take for a camera calibration
N_SAMPLES -> number of camera calibrations of each pattern type
output
--------
SHOW_DETECTED_PATTERN: print each image and detected pattern to screen
PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters
PLOT_ERROR_ARRAY: plot position error for the lens correction
pattern
--------
this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles
GRAYSCALE: whether to load the pattern as gray scale
RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%)
PERSPECTIVE: [True] -> enable perspective distortion
ROTATION: [True] -> enable rotation of the pattern
BLUR: False or (sizex,sizey), like (3,3)
CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2] | entailment |
def plotSet(imgDir, posExTime, outDir, show_legend,
show_plots, save_to_file, ftype):
'''
creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram
from all images within [imgDir]
posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5)
outDir - dirname to save the output images
show_legend - True/False
show_plots - display the result on screen
save_to_file - save the result to file
ftype - file type of the output images
'''
xvals = []
hist = []
peaks = []
exTimes = []
max_border = 0
if not imgDir.exists():
raise Exception("image dir doesn't exist")
for n, f in enumerate(imgDir):
print(f)
try:
# if imgDir.join(f).isfile():
img = imgDir.join(f)
s = FitHistogramPeaks(img)
xvals.append(s.xvals)
hist.append(s.yvals)
# smoothedHist.append(s.yvals2)
peaks.append(s.fitValues())
if s.border() > max_border:
max_border = s.plotBorder()
exTimes.append(float(f[posExTime[0]:posExTime[1] + 1]))
except:
pass
nx = 2
ny = int(len(hist) // nx) + len(hist) % nx
fig, ax = plt.subplots(ny, nx)
# flatten 2d-ax list:
if nx > 1:
ax = [list(i) for i in zip(*ax)] # transpose 2d-list
axx = []
for xa in ax:
for ya in xa:
axx.append(ya)
ax = axx
for x, h, p, e, a in zip(xvals, hist, peaks, exTimes, ax):
a.plot(x, h, label='histogram', thickness=3)
# l1 = a.plot(x, s, label='smoothed')
for n, pi in enumerate(p):
l2 = a.plot(x, pi, label='peak %s' % n, thickness=6)
a.set_xlim(xmin=0, xmax=max_border)
a.set_title('%s s' % e)
# plt.setp([l1,l2], linewidth=2)#, linestyle='--', color='r') # set
# both to dashed
l1 = ax[0].legend() # loc='upper center', bbox_to_anchor=(0.7, 1.05),
l1.draw_frame(False)
plt.xlabel('pixel value')
plt.ylabel('number of pixels')
fig = plt.gcf()
fig.set_size_inches(7 * nx, 3 * ny)
if save_to_file:
p = PathStr(outDir).join('result').setFiletype(ftype)
plt.savefig(p, bbox_inches='tight')
if show_plots:
plt.show() | creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram
from all images within [imgDir]
posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5)
outDir - dirname to save the output images
show_legend - True/False
show_plots - display the result on screen
save_to_file - save the result to file
ftype - file type of the output images | entailment |
def flatFieldFromCloseDistance(imgs, bg_imgs=None):
'''
Average multiple images of a homogeneous device
imaged directly in front the camera lens.
if [bg_imgs] are not given, background level is extracted
from 1% of the cumulative intensity distribution
of the averaged [imgs]
This measurement method is referred as 'Method A' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
'''
img = imgAverage(imgs)
bg = getBackground2(bg_imgs, img)
img -= bg
img = toGray(img)
mx = median_filter(img[::10, ::10], 3).max()
img /= mx
return img | Average multiple images of a homogeneous device
imaged directly in front the camera lens.
if [bg_imgs] are not given, background level is extracted
from 1% of the cumulative intensity distribution
of the averaged [imgs]
This measurement method is referred as 'Method A' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
--- | entailment |
def flatFieldFromCloseDistance2(images, bgImages=None, calcStd=False,
nlf=None, nstd=6):
'''
Same as [flatFieldFromCloseDistance]. Differences are:
... single-time-effect removal included
... returns the standard deviation of the image average [calcStd=True]
Optional:
-----------
calcStd -> set to True to also return the standard deviation
nlf -> noise level function (callable)
nstd -> artefact needs to deviate more than [nstd] to be removed
'''
if len(images) > 1:
# start with brightest images
def fn(img):
img = imread(img)
s0, s1 = img.shape[:2]
# rough approx. of image brightness:
return -img[::s0 // 10, ::s1 // 10].min()
images = sorted(images, key=lambda i: fn(i))
avgBg = getBackground2(bgImages, images[1])
i0 = imread(images[0], dtype=float) - avgBg
i1 = imread(images[1], dtype=float) - avgBg
if nlf is None:
nlf = oneImageNLF(i0, i1)[0]
det = SingleTimeEffectDetection(
(i0, i1), nlf, nStd=nstd, calcVariance=calcStd)
for i in images[1:]:
i = imread(i)
# exclude erroneously darker areas:
thresh = det.noSTE - nlf(det.noSTE) * nstd
mask = i > thresh
# filter STE:
det.addImage(i, mask)
ma = det.noSTE
else:
ma = imread(images[0], dtype=float) - avgBg
# fast artifact free maximum:
mx = median_filter(ma[::10, ::10], 3).max()
if calcStd:
return ma / mx, det.mma.var**0.5 / mx
return ma / mx | Same as [flatFieldFromCloseDistance]. Differences are:
... single-time-effect removal included
... returns the standard deviation of the image average [calcStd=True]
Optional:
-----------
calcStd -> set to True to also return the standard deviation
nlf -> noise level function (callable)
nstd -> artefact needs to deviate more than [nstd] to be removed | entailment |
def SNR_hinken(imgs, bg=0, roi=None):
'''
signal-to-noise ratio (SNR) as mean(images) / std(images)
as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766)
works on unloaded images
no memory overload if too many images are given
'''
mean = None
M = len(imgs)
if bg is not 0:
bg = imread(bg)[roi]
if roi is not None:
bg = bg[roi]
#calc mean:
for i in imgs:
img = imread(i).asfarray()
if roi is not None:
img = img[roi]
img -= bg
if mean is None:
#init
mean = np.zeros_like(img)
std = np.zeros_like(img)
mean += img
del img
mean /= M
#calc std of mean:
for i in imgs:
img = imread(i).asfarray()
if roi is not None:
img = img[roi]
img -= bg
std += (mean - img)**2
del img
std = (std / M)**0.5
return mean.mean() / std.mean() | signal-to-noise ratio (SNR) as mean(images) / std(images)
as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766)
works on unloaded images
no memory overload if too many images are given | entailment |
def boolMasksToImage(masks):
'''
Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int)
to one 8bit image
'''
assert len(masks) <= 8, 'can only transform up to 8 masks into image'
masks = np.asarray(masks, dtype=np.uint8)
assert masks.ndim == 3, 'layers need to be stack of 2d arrays'
return np.packbits(masks, axis=0)[0].T | Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int)
to one 8bit image | entailment |
def imageToBoolMasks(arr):
'''inverse of [boolMasksToImage]'''
assert arr.dtype == np.uint8, 'image needs to be dtype=uint8'
masks = np.unpackbits(arr).reshape(*arr.shape, 8)
return np.swapaxes(masks, 2, 0) | inverse of [boolMasksToImage] | entailment |
def calcAspectRatioFromCorners(corners, in_plane=False):
'''
simple and better alg. than below
in_plane -> whether object has no tilt, but only rotation and translation
'''
q = corners
l0 = [q[0, 0], q[0, 1], q[1, 0], q[1, 1]]
l1 = [q[0, 0], q[0, 1], q[-1, 0], q[-1, 1]]
l2 = [q[2, 0], q[2, 1], q[3, 0], q[3, 1]]
l3 = [q[2, 0], q[2, 1], q[1, 0], q[1, 1]]
a1 = line.length(l0) / line.length(l1)
a2 = line.length(l2) / line.length(l3)
if in_plane:
# take aspect ration from more rectangular corner
if (abs(0.5 * np.pi - abs(line.angle2(l0, l1)))
< abs(0.5 * np.pi - abs(line.angle2(l2, l3)))):
return a1
else:
return a2
return 0.5 * (a1 + a2) | simple and better alg. than below
in_plane -> whether object has no tilt, but only rotation and translation | entailment |
def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color,
thickness): # , lineType=None
'''
Extends cv2.putText with [alpha] argument
'''
x, y = cv2.getTextSize(text, fontFace,
fontScale, thickness)[0]
ox, oy = org
imgcut = img[oy - y - 3:oy, ox:ox + x]
if img.ndim == 3:
txtarr = np.zeros(shape=(y + 3, x, 3), dtype=np.uint8)
else:
txtarr = np.zeros(shape=(y + 3, x), dtype=np.uint8)
cv2.putText(txtarr, text, (0, y), fontFace,
fontScale, color,
thickness=thickness
#, lineType=lineType
)
cv2.addWeighted(txtarr, alpha, imgcut, 1, 0, imgcut, -1)
return img | Extends cv2.putText with [alpha] argument | entailment |
def fastMean(img, f=10, inplace=False):
'''
for bigger ksizes it if often faster to resize an image
rather than blur it...
'''
s0,s1 = img.shape[:2]
ss0 = int(round(s0/f))
ss1 = int(round(s1/f))
small = cv2.resize(img,(ss1,ss0), interpolation=cv2.INTER_AREA)
#bigger
k = {'interpolation':cv2.INTER_LINEAR}
if inplace:
k['dst']=img
return cv2.resize(small,(s1,s0), **k) | for bigger ksizes it if often faster to resize an image
rather than blur it... | entailment |
def read(*paths):
"""Build a file path from *paths* and return the contents."""
try:
f_name = os.path.join(*paths)
with open(f_name, 'r') as f:
return f.read()
except IOError:
print('%s not existing ... skipping' % f_name)
return '' | Build a file path from *paths* and return the contents. | entailment |
def averageSameExpTimes(imgs_path):
'''
average background images with same exposure time
'''
firsts = imgs_path[:2]
imgs = imgs_path[2:]
for n, i in enumerate(firsts):
firsts[n] = np.asfarray(imread(i))
d = DarkCurrentMap(firsts)
for i in imgs:
i = imread(i)
d.addImg(i)
return d.map() | average background images with same exposure time | entailment |
def getLinearityFunction(expTimes, imgs, mxIntensity=65535, min_ascent=0.001,
):
'''
returns offset, ascent
of image(expTime) = offset + ascent*expTime
'''
# TODO: calculate [min_ascent] from noise function
# instead of having it as variable
ascent, offset, error = linRegressUsingMasked2dArrays(
expTimes, imgs, imgs > mxIntensity)
ascent[np.isnan(ascent)] = 0
# remove low frequent noise:
if min_ascent > 0:
i = ascent < min_ascent
offset[i] += (0.5 * (np.min(expTimes) + np.max(expTimes))) * ascent[i]
ascent[i] = 0
return offset, ascent, error | returns offset, ascent
of image(expTime) = offset + ascent*expTime | entailment |
def sortForSameExpTime(expTimes, img_paths): # , excludeSingleImg=True):
'''
return image paths sorted for same exposure time
'''
d = {}
for e, i in zip(expTimes, img_paths):
if e not in d:
d[e] = []
d[e].append(i)
# for key in list(d.keys()):
# if len(d[key]) == 1:
# print('have only one image of exposure time [%s]' % key)
# print('--> exclude that one')
# d.pop(key)
d = OrderedDict(sorted(d.items()))
return list(d.keys()), list(d.values()) | return image paths sorted for same exposure time | entailment |
def getDarkCurrentAverages(exposuretimes, imgs):
'''
return exposure times, image averages for each exposure time
'''
x, imgs_p = sortForSameExpTime(exposuretimes, imgs)
s0, s1 = imgs[0].shape
imgs = np.empty(shape=(len(x), s0, s1),
dtype=imgs[0].dtype)
for i, ip in zip(imgs, imgs_p):
if len(ip) == 1:
i[:] = ip[0]
else:
i[:] = averageSameExpTimes(ip)
return x, imgs | return exposure times, image averages for each exposure time | entailment |
def getDarkCurrentFunction(exposuretimes, imgs, **kwargs):
'''
get dark current function from given images and exposure times
'''
exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs)
offs, ascent, rmse = getLinearityFunction(exposuretimes, imgs, **kwargs)
return offs, ascent, rmse | get dark current function from given images and exposure times | entailment |
def alignImageAlongLine(img, line, height=15, length=None,
zoom=1, fast=False, borderValue=0):
'''
return a sub image aligned along given line
@param img - numpy.2darray input image to get subimage from
@param line - list of 2 points [x0,y0,x1,y1])
@param height - height of output array in y
@param length - width of output array
@param zoom - zoom factor
@param fast - speed up calculation using nearest neighbour interpolation
@returns transformed image as numpy.2darray with found line as in the middle
'''
height = int(round(height))
if height % 2 == 0: # ->is even number
height += 1 # only take uneven numbers to have line in middle
if length is None:
length = int(round(ln.length(line)))
hh = (height - 1)
ll = (length - 1)
# end points of the line:
p0 = np.array(line[0:2], dtype=float)
p1 = np.array(line[2:], dtype=float)
# p2 is above middle of p0,p1:
norm = np.array(ln.normal(line))
if not ln.isHoriz(line):
norm *= -1
p2 = (p0 + p1) * 0.5 + norm * hh * 0.5
middleY = hh / 2
pp0 = [0, middleY]
pp1 = [ll, middleY]
pp2 = [ll * 0.5, hh]
pts1 = np.array([p0, p1, p2], dtype=np.float32)
pts2 = np.array([pp0, pp1, pp2], dtype=np.float32)
if zoom != 1:
length = int(round(length * zoom))
height = int(round(height * zoom))
pts2 *= zoom
# TRANSFORM:
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(
img, M, (length, height),
flags=cv2.INTER_NEAREST if fast else cv2.INTER_LINEAR,
borderValue=borderValue)
return dst | return a sub image aligned along given line
@param img - numpy.2darray input image to get subimage from
@param line - list of 2 points [x0,y0,x1,y1])
@param height - height of output array in y
@param length - width of output array
@param zoom - zoom factor
@param fast - speed up calculation using nearest neighbour interpolation
@returns transformed image as numpy.2darray with found line as in the middle | entailment |
def positionToIntensityUncertainty(image, sx, sy, kernelSize=None):
'''
calculates the estimated standard deviation map from the changes
of neighbouring pixels from a center pixel within a point spread function
defined by a std.dev. in x and y taken from the (sx, sy) maps
sx,sy -> either 2d array of same shape as [image]
of single values
'''
psf_is_const = not isinstance(sx, np.ndarray)
if not psf_is_const:
assert image.shape == sx.shape == sy.shape, \
"Image and position uncertainty maps need to have same size"
if kernelSize is None:
kernelSize = _kSizeFromStd(max(sx.max(), sy.max()))
else:
assert type(sx) in (int, float) and type(sx) in (int, float), \
"Image and position uncertainty values need to be int OR float"
if kernelSize is None:
kernelSize = _kSizeFromStd(max(sx, sy))
if image.dtype.kind == 'u':
image = image.astype(int) # otherwise stack overflow through uint
size = kernelSize // 2
if size < 1:
size = 1
kernelSize = 1 + 2 * size
# array to be filled by individual psf of every pixel:
psf = np.zeros((kernelSize, kernelSize))
# intensity uncertainty as stdev:
sint = np.zeros(image.shape)
if psf_is_const:
_calc_constPSF(image, sint, sx, sy, psf, size)
else:
_calc_variPSF(image, sint, sx, sy, psf, size)
return sint | calculates the estimated standard deviation map from the changes
of neighbouring pixels from a center pixel within a point spread function
defined by a std.dev. in x and y taken from the (sx, sy) maps
sx,sy -> either 2d array of same shape as [image]
of single values | entailment |
def _coarsenImage(image, f):
'''
seems to be a more precise (but slower)
way to down-scale an image
'''
from skimage.morphology import square
from skimage.filters import rank
from skimage.transform._warps import rescale
selem = square(f)
arri = rank.mean(image, selem=selem)
return rescale(arri, 1 / f, order=0) | seems to be a more precise (but slower)
way to down-scale an image | entailment |
def positionToIntensityUncertaintyForPxGroup(image, std, y0, y1, x0, x1):
'''
like positionToIntensityUncertainty
but calculated average uncertainty for an area [y0:y1,x0:x1]
'''
fy, fx = y1 - y0, x1 - x0
if fy != fx:
raise Exception('averaged area need to be square ATM')
image = _coarsenImage(image, fx)
k = _kSizeFromStd(std)
y0 = int(round(y0 / fy))
x0 = int(round(x0 / fx))
arr = image[y0 - k:y0 + k, x0 - k:x0 + k]
U = positionToIntensityUncertainty(arr, std / fx, std / fx)
return U[k:-k, k:-k] | like positionToIntensityUncertainty
but calculated average uncertainty for an area [y0:y1,x0:x1] | entailment |
def nan_maximum_filter(arr, ksize):
'''
same as scipy.filters.maximum_filter
but working excluding nans
'''
out = np.empty_like(arr)
_calc(arr, out, ksize//2)
return out | same as scipy.filters.maximum_filter
but working excluding nans | entailment |
def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True):
'''
set every the pixel value of the given [img] to the median filtered one
of a given kernel [size]
in case the relative [threshold] is exeeded
condition = '>' OR '<'
'''
from scipy.ndimage import median_filter
indices = None
if threshold > 0:
blur = np.asfarray(median_filter(img, size=size))
with np.errstate(divide='ignore', invalid='ignore', over='ignore'):
if condition == '>':
indices = abs((img - blur) / blur) > threshold
else:
indices = abs((img - blur) / blur) < threshold
if copy:
img = img.copy()
img[indices] = blur[indices]
return img, indices | set every the pixel value of the given [img] to the median filtered one
of a given kernel [size]
in case the relative [threshold] is exeeded
condition = '>' OR '<' | entailment |
def fastFilter(arr, ksize=30, every=None, resize=True, fn='median',
interpolation=cv2.INTER_LANCZOS4,
smoothksize=0,
borderMode=cv2.BORDER_REFLECT):
'''
fn['nanmean', 'mean', 'nanmedian', 'median']
a fast 2d filter for large kernel sizes that also
works with nans
the computation speed is increased because only 'every'nsth position
within the median kernel is evaluated
'''
if every is None:
every = max(ksize//3, 1)
else:
assert ksize >= 3*every
s0,s1 = arr.shape[:2]
ss0 = s0//every
every = s0//ss0
ss1 = s1//every
out = np.full((ss0+1,ss1+1), np.nan)
c = {'median':_calcMedian,
'nanmedian':_calcNanMedian,
'nanmean':_calcNanMean,
'mean':_calcMean,
}[fn]
ss0,ss1 = c(arr, out, ksize, every)
out = out[:ss0,:ss1]
if smoothksize:
out = gaussian_filter(out, smoothksize)
if not resize:
return out
return cv2.resize(out, arr.shape[:2][::-1],
interpolation=interpolation) | fn['nanmean', 'mean', 'nanmedian', 'median']
a fast 2d filter for large kernel sizes that also
works with nans
the computation speed is increased because only 'every'nsth position
within the median kernel is evaluated | entailment |
def elbin(filename):
'''
Read EL images (*.elbin) created by the RELTRON EL Software
http://www.reltron.com/Products/Solar.html
'''
# arrs = []
labels = []
# These are all exposure times [s] to be selectable:
TIMES = (0.3, 0.4, 0.6, 0.8, 1.2, 1.6, 2.4, 3.2, 4.8, 6.4, 9.6, 12.8, 19.2,
25.6, 38.4, 51.2, 76.8, 102.6, 153.6, 204.6, 307.2, 409.8, 614.4,
819., 1228.8, 1638.6, 3276.6, 5400., 8100., 12168., 18216., 27324.,
41004., 61488., 92268.)
with open(filename, 'rb') as f:
# image shape and number:
height, width, frames = np.frombuffer(f.read(4 * 3), dtype=np.uint32)
arrs = np.empty((frames, width, height), dtype=np.uint16)
for i in range(frames):
# read header between all frames:
current, voltage = np.frombuffer(f.read(8 * 2), dtype=np.float64)
i_time = np.frombuffer(f.read(4), dtype=np.uint32)[0]
time = TIMES[i_time]
# read image:
arr = np.frombuffer(f.read(width * height * 2), dtype=np.uint16)
arrs[i] = arr.reshape(width, height)
# last row is all zeros in all imgs
# print arr[:,:-1]
# arrs.append(arr)
labels.append({'exposure time[s]': time,
'current[A]': current,
'voltage[V]': voltage})
return arrs, labels | Read EL images (*.elbin) created by the RELTRON EL Software
http://www.reltron.com/Products/Solar.html | entailment |
def gaussian2d(xy, sx, sy, mx=0, my=0, rho=0, amp=1, offs=0):
'''
see http://en.wikipedia.org/wiki/Multivariate_normal_distribution
# probability density function of a vector [x,y]
sx,sy -> sigma (standard deviation)
mx,my: mue (mean position)
rho: correlation between x and y
'''
x,y = xy
return offs+amp*(
1/(2*np.pi*sx*sy*(1-(rho**2))**0.5) *
np.exp( (-1/(2*(1-rho**2))) *
(
( (x-mx)**2/sx**2 )
+ ( (y-my)**2/sy**2 )
- ( ( 2*rho*(x-mx)*(y-my)) / (sx*sy) )
)
)
) | see http://en.wikipedia.org/wiki/Multivariate_normal_distribution
# probability density function of a vector [x,y]
sx,sy -> sigma (standard deviation)
mx,my: mue (mean position)
rho: correlation between x and y | entailment |
def fitImg(self, img_rgb):
'''
fit perspective and size of the input image to the base image
'''
H = self.pattern.findHomography(img_rgb)[0]
H_inv = self.pattern.invertHomography(H)
s = self.img_orig.shape
warped = cv2.warpPerspective(img_rgb, H_inv, (s[1], s[0]))
return warped | fit perspective and size of the input image to the base image | entailment |
def scaleSignalCut(img, ratio, nbins=100):
'''
scaling img cutting x percent of top and bottom part of histogram
'''
start, stop = scaleSignalCutParams(img, ratio, nbins)
img = img - start
img /= (stop - start)
return img | scaling img cutting x percent of top and bottom part of histogram | entailment |
def scaleSignal(img, fitParams=None,
backgroundToZero=False, reference=None):
'''
scale the image between...
backgroundToZero=True -> 0 (average background) and 1 (maximum signal)
backgroundToZero=False -> signal+-3std
reference -> reference image -- scale image to fit this one
returns:
scaled image
'''
img = imread(img)
if reference is not None:
# def fn(ii, m,n):
# return ii*m+n
# curve_fit(fn, img[::10,::10], ref[::10,::10])
low, high = signalRange(img, fitParams)
low2, high2 = signalRange(reference)
img = np.asfarray(img)
ampl = (high2 - low2) / (high - low)
img -= low
img *= ampl
img += low2
return img
else:
offs, div = scaleParams(img, fitParams, backgroundToZero)
img = np.asfarray(img) - offs
img /= div
print('offset: %s, divident: %s' % (offs, div))
return img | scale the image between...
backgroundToZero=True -> 0 (average background) and 1 (maximum signal)
backgroundToZero=False -> signal+-3std
reference -> reference image -- scale image to fit this one
returns:
scaled image | entailment |
def getBackgroundRange(fitParams):
'''
return minimum, average, maximum of the background peak
'''
smn, _, _ = getSignalParameters(fitParams)
bg = fitParams[0]
_, avg, std = bg
bgmn = max(0, avg - 3 * std)
if avg + 4 * std < smn:
bgmx = avg + 4 * std
if avg + 3 * std < smn:
bgmx = avg + 3 * std
if avg + 2 * std < smn:
bgmx = avg + 2 * std
else:
bgmx = avg + std
return bgmn, avg, bgmx | return minimum, average, maximum of the background peak | entailment |
def hasBackground(fitParams):
'''
compare the height of putative bg and signal peak
if ratio if too height assume there is no background
'''
signal = getSignalPeak(fitParams)
bg = getBackgroundPeak(fitParams)
if signal == bg:
return False
r = signal[0] / bg[0]
if r < 1:
r = 1 / r
return r < 100 | compare the height of putative bg and signal peak
if ratio if too height assume there is no background | entailment |
def signalMinimum2(img, bins=None):
'''
minimum position between signal and background peak
'''
f = FitHistogramPeaks(img, bins=bins)
i = signalPeakIndex(f.fitParams)
spos = f.fitParams[i][1]
# spos = getSignalPeak(f.fitParams)[1]
# bpos = getBackgroundPeak(f.fitParams)[1]
bpos = f.fitParams[i - 1][1]
ind = np.logical_and(f.xvals > bpos, f.xvals < spos)
try:
i = np.argmin(f.yvals[ind])
return f.xvals[ind][i]
except ValueError as e:
if bins is None:
return signalMinimum2(img, bins=400)
else:
raise e | minimum position between signal and background peak | entailment |
def signalMinimum(img, fitParams=None, n_std=3):
'''
intersection between signal and background peak
'''
if fitParams is None:
fitParams = FitHistogramPeaks(img).fitParams
assert len(fitParams) > 1, 'need 2 peaks so get minimum signal'
i = signalPeakIndex(fitParams)
signal = fitParams[i]
bg = getBackgroundPeak(fitParams)
smn = signal[1] - n_std * signal[2]
bmx = bg[1] + n_std * bg[2]
if smn > bmx:
return smn
# peaks are overlapping
# define signal min. as intersection between both Gaussians
def solve(p1, p2):
s1, m1, std1 = p1
s2, m2, std2 = p2
a = (1 / (2 * std1**2)) - (1 / (2 * std2**2))
b = (m2 / (std2**2)) - (m1 / (std1**2))
c = (m1**2 / (2 * std1**2)) - (m2**2 / (2 * std2**2)) - \
np.log(((std2 * s1) / (std1 * s2)))
return np.roots([a, b, c])
i = solve(bg, signal)
try:
return i[np.logical_and(i > bg[1], i < signal[1])][0]
except IndexError:
# this error shouldn't occur... well
return max(smn, bmx) | intersection between signal and background peak | entailment |
def getSignalParameters(fitParams, n_std=3):
'''
return minimum, average, maximum of the signal peak
'''
signal = getSignalPeak(fitParams)
mx = signal[1] + n_std * signal[2]
mn = signal[1] - n_std * signal[2]
if mn < fitParams[0][1]:
mn = fitParams[0][1] # set to bg
return mn, signal[1], mx | return minimum, average, maximum of the signal peak | entailment |
def equalizeImage(img, save_path=None, name_additive='_eqHist'):
'''
Equalize the histogram (contrast) of an image
works with RGB/multi-channel images
and flat-arrays
@param img - image_path or np.array
@param save_path if given output images will be saved there
@param name_additive if given this additive will be appended to output images
@return output images if input images are numpy.arrays and no save_path is given
@return None elsewise
'''
if isinstance(img, string_types):
img = PathStr(img)
if not img.exists():
raise Exception("image path doesn't exist")
img_name = img.basename().replace('.', '%s.' % name_additive)
if save_path is None:
save_path = img.dirname()
img = cv2.imread(img)
if img.dtype != np.dtype('uint8'):
# openCV cannot work with float arrays or uint > 8bit
eqFn = _equalizeHistogram
else:
eqFn = cv2.equalizeHist
if len(img.shape) == 3: # multi channel img like rgb
for i in range(img.shape[2]):
img[:, :, i] = eqFn(img[:, :, i])
else: # grey scale image
img = eqFn(img)
if save_path:
img_name = PathStr(save_path).join(img_name)
cv2.imwrite(img_name, img)
return img | Equalize the histogram (contrast) of an image
works with RGB/multi-channel images
and flat-arrays
@param img - image_path or np.array
@param save_path if given output images will be saved there
@param name_additive if given this additive will be appended to output images
@return output images if input images are numpy.arrays and no save_path is given
@return None elsewise | entailment |
def _equalizeHistogram(img):
'''
histogram equalisation not bounded to int() or an image depth of 8 bit
works also with negative numbers
'''
# to float if int:
intType = None
if 'f' not in img.dtype.str:
TO_FLOAT_TYPES = {np.dtype('uint8'): np.float16,
np.dtype('uint16'): np.float32,
np.dtype('uint32'): np.float64,
np.dtype('uint64'): np.float64}
intType = img.dtype
img = img.astype(TO_FLOAT_TYPES[intType], copy=False)
# get image deph
DEPTH_TO_NBINS = {np.dtype('float16'): 256, # uint8
np.dtype('float32'): 32768, # uint16
np.dtype('float64'): 2147483648} # uint32
nBins = DEPTH_TO_NBINS[img.dtype]
# scale to -1 to 1 due to skikit-image restrictions
mn, mx = np.amin(img), np.amax(img)
if abs(mn) > abs(mx):
mx = mn
img /= mx
img = exposure.equalize_hist(img, nbins=nBins)
img *= mx
if intType:
img = img.astype(intType)
return img | histogram equalisation not bounded to int() or an image depth of 8 bit
works also with negative numbers | entailment |
def localizedMaximum(img, thresh=0, min_increase=0, max_length=0, dtype=bool):
'''
Returns the local maximum of a given 2d array
thresh -> if given, ignore all values below that value
max_length -> limit length between value has to vary > min_increase
>>> a = np.array([[0,1,2,3,2,1,0], \
[0,1,2,2,3,1,0], \
[0,1,1,2,2,3,0], \
[0,1,1,2,1,1,0], \
[0,0,0,1,1,0,0]])
>>> print localizedMaximum(a, dtype=int)
[[0 1 1 1 0 1 0]
[0 0 0 0 1 0 0]
[0 0 0 1 0 1 0]
[0 0 1 1 0 1 0]
[0 0 0 1 0 0 0]]
'''
# because numba cannot create arrays:
out = np.zeros(shape=img.shape, dtype=dtype)
# first iterate all rows:
_calc(img, out, thresh, min_increase, max_length)
# that all columns:
_calc(img.T, out.T, thresh, min_increase, max_length)
return out | Returns the local maximum of a given 2d array
thresh -> if given, ignore all values below that value
max_length -> limit length between value has to vary > min_increase
>>> a = np.array([[0,1,2,3,2,1,0], \
[0,1,2,2,3,1,0], \
[0,1,1,2,2,3,0], \
[0,1,1,2,1,1,0], \
[0,0,0,1,1,0,0]])
>>> print localizedMaximum(a, dtype=int)
[[0 1 1 1 0 1 0]
[0 0 0 0 1 0 0]
[0 0 0 1 0 1 0]
[0 0 1 1 0 1 0]
[0 0 0 1 0 0 0]] | entailment |
def setReference(self, ref):
'''
ref ... either quad, grid, homography or reference image
quad --> list of four image points(x,y) marking the edges of the quad
to correct
homography --> h. matrix to correct perspective distortion
referenceImage --> image of same object without perspective distortion
'''
# self.maps = {}
self.quad = None
# self.refQuad = None
self._camera_position = None
self._homography = None
self._homography_is_fixed = True
# self.tvec, self.rvec = None, None
self._pose = None
# evaluate input:
if isinstance(ref, np.ndarray) and ref.shape == (3, 3):
# REF IS HOMOGRAPHY
self._homography = ref
# REF IS QUAD
elif len(ref) == 4:
self.quad = sortCorners(ref)
# TODO: cleanup # only need to call once - here
o = self.obj_points # no property any more
# REF IS IMAGE
else:
self.ref = imread(ref)
# self._refshape = ref.shape[:2]
self.pattern = PatternRecognition(self.ref)
self._homography_is_fixed = False | ref ... either quad, grid, homography or reference image
quad --> list of four image points(x,y) marking the edges of the quad
to correct
homography --> h. matrix to correct perspective distortion
referenceImage --> image of same object without perspective distortion | entailment |
def distort(self, img, rotX=0, rotY=0, quad=None):
'''
Apply perspective distortion ion self.img
angles are in DEG and need to be positive to fit into image
'''
self.img = imread(img)
# fit old image to self.quad:
corr = self.correct(self.img)
s = self.img.shape
if quad is None:
wquad = (self.quad - self.quad.mean(axis=0)).astype(float)
win_width = s[1]
win_height = s[0]
# project quad:
for n, q in enumerate(wquad):
p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY)
p = p.project(win_width, win_height, s[1], s[1])
wquad[n] = (p.x, p.y)
wquad = sortCorners(wquad)
# scale result so that longest side of quad and wquad are equal
w = wquad[:, 0].max() - wquad[:, 0].min()
h = wquad[:, 1].max() - wquad[:, 1].min()
scale = min(s[1] / w, s[0] / h)
# scale:
wquad = (wquad * scale).astype(int)
else:
wquad = sortCorners(quad)
wquad -= wquad.min(axis=0)
lx = corr.shape[1]
ly = corr.shape[0]
objP = np.array([
[0, 0],
[lx, 0],
[lx, ly],
[0, ly],
], dtype=np.float32)
homography = cv2.getPerspectiveTransform(
wquad.astype(np.float32), objP)
# distort corr:
w = wquad[:, 0].max() - wquad[:, 0].min()
h = wquad[:, 1].max() - wquad[:, 1].min()
#(int(w),int(h))
dist = cv2.warpPerspective(corr, homography, (int(w), int(h)),
flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP)
# move middle of dist to middle of the old quad:
bg = np.zeros(shape=s)
rmn = (bg.shape[0] / 2, bg.shape[1] / 2)
ss = dist.shape
mn = (ss[0] / 2, ss[1] / 2) # wquad.mean(axis=0)
ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1]))
bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist
# finally move quad into right position:
self.quad = wquad
self.quad += (ref[1], ref[0])
self.img = bg
self._homography = None
self._poseFromQuad()
if self.opts['do_correctIntensity']:
tf = self.tiltFactor()
if self.img.ndim == 3:
for col in range(self.img.shape[2]):
self.img[..., col] *= tf
else:
# tf = np.tile(tf, (1,1,self.img.shape[2]))
self.img = self.img * tf
return self.img | Apply perspective distortion ion self.img
angles are in DEG and need to be positive to fit into image | entailment |
def correctGrid(self, img, grid):
'''
grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,)
'''
self.img = imread(img)
h = self.homography # TODO: cleanup only needed to get newBorder attr.
if self.opts['do_correctIntensity']:
self.img = self.img / self._getTiltFactor(self.img.shape)
s0, s1 = grid.shape[:2]
n0, n1 = s0 - 1, s1 - 1
snew = self._newBorders
b = self.opts['border']
sx, sy = (snew[0] - 2 * b) // n0, (snew[1] - 2 * b) // n1
out = np.empty(snew[::-1], dtype=self.img.dtype)
def warp(ix, iy, objP, outcut):
shape = outcut.shape[::-1]
quad = grid[ix:ix + 2,
iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])]
hcell = cv2.getPerspectiveTransform(
quad.astype(np.float32), objP)
cv2.warpPerspective(self.img, hcell, shape, outcut,
flags=cv2.INTER_LANCZOS4,
**self.opts['cv2_opts'])
return quad
objP = np.array([[0, 0],
[sx, 0],
[sx, sy],
[0, sy]], dtype=np.float32)
# INNER CELLS
for ix in range(1, n0 - 1):
for iy in range(1, n1 - 1):
sub = out[iy * sy + b: (iy + 1) * sy + b,
ix * sx + b: (ix + 1) * sx + b]
# warp(ix, iy, objP, sub)
shape = sub.shape[::-1]
quad = grid[ix:ix + 2,
iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])]
# print(quad, objP)
hcell = cv2.getPerspectiveTransform(
quad.astype(np.float32), objP)
cv2.warpPerspective(self.img, hcell, shape, sub,
flags=cv2.INTER_LANCZOS4,
**self.opts['cv2_opts'])
# return out
# TOP CELLS
objP[:, 1] += b
for ix in range(1, n0 - 1):
warp(ix, 0, objP, out[: sy + b,
ix * sx + b: (ix + 1) * sx + b])
# BOTTOM CELLS
objP[:, 1] -= b
for ix in range(1, n0 - 1):
iy = (n1 - 1)
y = iy * sy + b
x = ix * sx + b
warp(ix, iy, objP, out[y: y + sy + b, x: x + sx])
# LEFT CELLS
objP[:, 0] += b
for iy in range(1, n1 - 1):
y = iy * sy + b
warp(0, iy, objP, out[y: y + sy, : sx + b])
# RIGHT CELLS
objP[:, 0] -= b
ix = (n0 - 1)
x = ix * sx + b
for iy in range(1, n1 - 1):
y = iy * sy + b
warp(ix, iy, objP, out[y: y + sy, x: x + sx + b])
# BOTTOM RIGHT CORNER
warp(n0 - 1, n1 - 1, objP, out[-sy - b - 1:, x: x + sx + b])
# #TOP LEFT CORNER
objP += (b, b)
warp(0, 0, objP, out[0: sy + b, 0: sx + b])
# TOP RIGHT CORNER
objP[:, 0] -= b
# x = (n0-1)*sx+b
warp(n0 - 1, 0, objP, out[: sy + b, x: x + sx + b])
# #BOTTOM LEFT CORNER
objP += (b, -b)
warp(0, n1 - 1, objP, out[-sy - b - 1:, : sx + b])
return out | grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) | entailment |
def correct(self, img):
'''
...from perspective distortion:
--> perspective transformation
--> apply tilt factor (view factor) correction
'''
print("CORRECT PERSPECTIVE ...")
self.img = imread(img)
if not self._homography_is_fixed:
self._homography = None
h = self.homography
if self.opts['do_correctIntensity']:
tf = self.tiltFactor()
self.img = np.asfarray(self.img)
if self.img.ndim == 3:
for col in range(self.img.shape[2]):
self.img[..., col] /= tf
else:
self.img = self.img / tf
warped = cv2.warpPerspective(self.img,
h,
self._newBorders[::-1],
flags=cv2.INTER_LANCZOS4,
**self.opts['cv2_opts'])
return warped | ...from perspective distortion:
--> perspective transformation
--> apply tilt factor (view factor) correction | entailment |
def camera_position(self, pose=None):
'''
returns camera position in world coordinates using self.rvec and self.tvec
from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector
'''
if pose is None:
pose = self.pose()
t, r = pose
return -np.matrix(cv2.Rodrigues(r)[0]).T * np.matrix(t) | returns camera position in world coordinates using self.rvec and self.tvec
from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector | entailment |
def viewAngle(self, **kwargs):
'''
calculate view factor between one small and one finite surface
vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA
according to VDI heatatlas 2010 p961
'''
v0 = self.cam2PlaneVectorField(**kwargs)
# obj cannot be behind camera
v0[2][v0[2] < 0] = np.nan
_t, r = self.pose()
n = self.planeSfN(r)
# because of different x,y orientation:
n[2] *= -1
# beta2 = vectorAngle(v0, vectorToField(n) )
beta2 = vectorAngle(v0, n)
return beta2 | calculate view factor between one small and one finite surface
vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA
according to VDI heatatlas 2010 p961 | entailment |
def foreground(self, quad=None):
'''return foreground (quad) mask'''
fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8)
if quad is None:
quad = self.quad
else:
quad = quad.astype(np.int32)
cv2.fillConvexPoly(fg, quad, 1)
return fg.astype(bool) | return foreground (quad) mask | entailment |
def tiltFactor(self, midpointdepth=None,
printAvAngle=False):
'''
get tilt factor from inverse distance law
https://en.wikipedia.org/wiki/Inverse-square_law
'''
# TODO: can also be only def. with FOV, rot, tilt
beta2 = self.viewAngle(midpointdepth=midpointdepth)
try:
angles, vals = getattr(
emissivity_vs_angle, self.opts['material'])()
except AttributeError:
raise AttributeError("material[%s] is not in list of know materials: %s" % (
self.opts['material'], [o[0] for o in getmembers(emissivity_vs_angle)
if isfunction(o[1])]))
if printAvAngle:
avg_angle = beta2[self.foreground()].mean()
print('angle: %s DEG' % np.degrees(avg_angle))
# use averaged angle instead of beta2 to not overemphasize correction
normEmissivity = np.clip(
InterpolatedUnivariateSpline(
np.radians(angles), vals)(beta2), 0, 1)
return normEmissivity | get tilt factor from inverse distance law
https://en.wikipedia.org/wiki/Inverse-square_law | entailment |
def standardUncertainties(self, focal_Length_mm, f_number, midpointdepth=1000,
focusAtYX=None,
# sigma_best_focus=0,
# quad_pos_err=0,
shape=None,
uncertainties=(0, 0)):
'''
focusAtXY - image position with is in focus
if not set it is assumed that the image middle is in focus
sigma_best_focus - standard deviation of the PSF
within the best focus (default blur)
uncertainties - contibutors for standard uncertainty
these need to be perspective transformed to fit the new
image shape
'''
# TODO: consider quad_pos_error
# (also influences intensity corr map)
if shape is None:
s = self.img.shape
else:
s = shape
# 1. DEFOCUS DUE TO DEPTH OF FIELD
##################################
depthMap = self.depthMap(midpointdepth)
if focusAtYX is None:
# assume image middle is in-focus:
focusAtYX = s[0] // 2, s[1] // 2
infocusDepth = depthMap[focusAtYX]
depthOfField_blur = defocusThroughDepth(
depthMap, infocusDepth, focal_Length_mm, f_number, k=2.335)
# 2. INCREAASED PIXEL SIZE DUE TO INTERPOLATION BETWEEN
# PIXELS MOVED APARD
######################################################
# index maps:
py, px = np.mgrid[0:s[0], 0:s[1]]
# warped index maps:
wx = cv2.warpPerspective(np.asfarray(px), self.homography,
self._newBorders,
borderValue=np.nan,
flags=cv2.INTER_LANCZOS4)
wy = cv2.warpPerspective(np.asfarray(py), self.homography,
self._newBorders,
borderValue=np.nan,
flags=cv2.INTER_LANCZOS4)
pxSizeFactorX = 1 / np.abs(np.gradient(wx)[1])
pxSizeFactorY = 1 / np.abs(np.gradient(wy)[0])
# WARP ALL FIELD TO NEW PERSPECTIVE AND MULTIPLY WITH PXSIZE FACTOR:
depthOfField_blur = cv2.warpPerspective(
depthOfField_blur, self.homography, self._newBorders,
borderValue=np.nan,
)
# perspective transform given uncertainties:
warpedU = []
for u in uncertainties:
# warpedU.append([])
# for i in u:
# print i, type(i), isinstance(i, np.ndarray)
if isinstance(u, np.ndarray) and u.size > 1:
u = cv2.warpPerspective(u, self.homography,
self._newBorders,
borderValue=np.nan,
flags=cv2.INTER_LANCZOS4) # *f
else:
# multiply with area ratio: after/before perspective warp
u *= self.areaRatio
warpedU.append(u)
# given uncertainties after warp:
ux, uy = warpedU
ux = pxSizeFactorX * (ux**2 + depthOfField_blur**2)**0.5
uy = pxSizeFactorY * (uy**2 + depthOfField_blur**2)**0.5
# TODO: remove depthOfField_blur,fx,fy from return
return ux, uy, depthOfField_blur, pxSizeFactorX, pxSizeFactorY | focusAtXY - image position with is in focus
if not set it is assumed that the image middle is in focus
sigma_best_focus - standard deviation of the PSF
within the best focus (default blur)
uncertainties - contibutors for standard uncertainty
these need to be perspective transformed to fit the new
image shape | entailment |
def _poseFromQuad(self, quad=None):
'''
estimate the pose of the object plane using quad
setting:
self.rvec -> rotation vector
self.tvec -> translation vector
'''
if quad is None:
quad = self.quad
if quad.ndim == 3:
quad = quad[0]
# http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
# Find the rotation and translation vectors.
img_pn = np.ascontiguousarray(quad[:, :2],
dtype=np.float32).reshape((4, 1, 2))
obj_pn = self.obj_points - self.obj_points.mean(axis=0)
retval, rvec, tvec = cv2.solvePnP(
obj_pn,
img_pn,
self.opts['cameraMatrix'],
self.opts['distCoeffs'],
flags=cv2.SOLVEPNP_P3P # because exactly four points are given
)
if not retval:
print("Couln't estimate pose")
return tvec, rvec | estimate the pose of the object plane using quad
setting:
self.rvec -> rotation vector
self.tvec -> translation vector | entailment |
def drawQuad(self, img=None, quad=None, thickness=30):
'''
Draw the quad into given img
'''
if img is None:
img = self.img
if quad is None:
quad = self.quad
q = np.int32(quad)
c = int(img.max())
cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness)
cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness)
cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness)
cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness)
return img | Draw the quad into given img | entailment |
def draw3dCoordAxis(self, img=None, thickness=8):
'''
draw the 3d coordinate axes into given image
if image == False:
create an empty image
'''
if img is None:
img = self.img
elif img is False:
img = np.zeros(shape=self.img.shape, dtype=self.img.dtype)
else:
img = imread(img)
# project 3D points to image plane:
# self.opts['obj_width_mm'], self.opts['obj_height_mm']
w, h = self.opts['new_size']
axis = np.float32([[0.5 * w, 0.5 * h, 0],
[w, 0.5 * h, 0],
[0.5 * w, h, 0],
[0.5 * w, 0.5 * h, -0.5 * w]])
t, r = self.pose()
imgpts = cv2.projectPoints(axis, r, t,
self.opts['cameraMatrix'],
self.opts['distCoeffs'])[0]
mx = int(img.max())
origin = tuple(imgpts[0].ravel())
cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness)
cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness)
cv2.line(
img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0), thickness * 2)
return img | draw the 3d coordinate axes into given image
if image == False:
create an empty image | entailment |
def _calcQuadSize(corners, aspectRatio):
'''
return the size of a rectangle in perspective distortion in [px]
DEBUG: PUT THAT BACK IN??::
if aspectRatio is not given is will be determined
'''
if aspectRatio > 1: # x is bigger -> reduce y
x_length = PerspectiveCorrection._quadXLength(corners)
y = x_length / aspectRatio
return x_length, y
else: # y is bigger -> reduce x
y_length = PerspectiveCorrection._quadYLength(corners)
x = y_length * aspectRatio
return x, y_length | return the size of a rectangle in perspective distortion in [px]
DEBUG: PUT THAT BACK IN??::
if aspectRatio is not given is will be determined | entailment |
def linearToPolar(img, center=None,
final_radius=None,
initial_radius=None,
phase_width=None,
interpolation=cv2.INTER_AREA, maps=None,
borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts):
'''
map a 2d (x,y) Cartesian array to a polar (r, phi) array
using opencv.remap
'''
if maps is None:
mapY, mapX = linearToPolarMaps(img.shape[:2], center, final_radius,
initial_radius, phase_width)
else:
mapY, mapX = maps
o = {'interpolation': interpolation,
'borderValue': borderValue,
'borderMode': borderMode}
o.update(opts)
return cv2.remap(img, mapY, mapX, **o) | map a 2d (x,y) Cartesian array to a polar (r, phi) array
using opencv.remap | entailment |
def polarToLinear(img, shape=None, center=None, maps=None,
interpolation=cv2.INTER_AREA,
borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts):
'''
map a 2d polar (r, phi) polar array to a Cartesian (x,y) array
using opencv.remap
'''
if maps is None:
mapY, mapX = polarToLinearMaps(img.shape[:2], shape, center)
else:
mapY, mapX = maps
o = {'interpolation': interpolation,
'borderValue': borderValue,
'borderMode': borderMode}
o.update(opts)
return cv2.remap(img, mapY, mapX, **o) | map a 2d polar (r, phi) polar array to a Cartesian (x,y) array
using opencv.remap | entailment |
def modifiedLaplacian(img):
''''LAPM' algorithm (Nayar89)'''
M = np.array([-1, 2, -1])
G = cv2.getGaussianKernel(ksize=3, sigma=-1)
Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G)
Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M)
FM = np.abs(Lx) + np.abs(Ly)
return cv2.mean(FM)[0] | LAPM' algorithm (Nayar89) | entailment |
def varianceOfLaplacian(img):
''''LAPV' algorithm (Pech2000)'''
lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F)
stdev = cv2.meanStdDev(lap)[1]
s = stdev[0]**2
return s[0] | LAPV' algorithm (Pech2000) | entailment |
def tenengrad(img, ksize=3):
''''TENG' algorithm (Krotkov86)'''
Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
FM = Gx*Gx + Gy*Gy
mn = cv2.mean(FM)[0]
if np.isnan(mn):
return np.nanmean(FM)
return mn | TENG' algorithm (Krotkov86) | entailment |
def normalizedGraylevelVariance(img):
''''GLVN' algorithm (Santos97)'''
mean, stdev = cv2.meanStdDev(img)
s = stdev[0]**2 / mean[0]
return s[0] | GLVN' algorithm (Santos97) | entailment |
def linePlot(img, x0, y0, x1, y1, resolution=None, order=3):
'''
returns [img] intensity values along line
defined by [x0, y0, x1, y1]
resolution ... number or data points to evaluate
order ... interpolation precision
'''
if resolution is None:
resolution = int( ((x1-x0)**2 + (y1-y0)**2 )**0.5 )
if order == 0:
x = np.linspace(x0, x1, resolution, dtype=int)
y = np.linspace(y0, y1, resolution, dtype=int)
return img[y, x]
x = np.linspace(x0, x1, resolution)
y = np.linspace(y0, y1, resolution)
return map_coordinates(img, np.vstack((y,x)), order=order) | returns [img] intensity values along line
defined by [x0, y0, x1, y1]
resolution ... number or data points to evaluate
order ... interpolation precision | entailment |
def flatFieldFromFunction(self):
'''
calculate flatField from fitting vignetting function to averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
fitimg, mask = self._prepare()
mask = ~mask
s0, s1 = fitimg.shape
#f-value, alpha, fx, cx, cy
guess = (s1 * 0.7, 0, 1, s0 / 2, s1 / 2)
# set assume normal plane - no tilt and rotation:
fn = lambda xy, f, alpha, fx, cx, cy: vignetting((xy[0] * fx, xy[1]), f, alpha,
cx=cx, cy=cy)
# mask = fitimg>0.5
flatfield = fit2dArrayToFn(fitimg, fn, mask=mask,
guess=guess, output_shape=self._orig_shape)[0]
return flatfield, self.bglevel / self._n, fitimg, mask | calculate flatField from fitting vignetting function to averaged fit-image
returns flatField, average background level, fitted image, valid indices mask | entailment |
def flatFieldFromFit(self):
'''
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
fitimg, mask = self._prepare()
out = fitimg.copy()
lastm = 0
for _ in range(10):
out = polyfit2dGrid(out, mask, 2)
mask = highGrad(out)
m = mask.sum()
if m == lastm:
break
lastm = m
out = np.clip(out, 0.1, 1)
out = resize(out, self._orig_shape, mode='reflect')
return out, self.bglevel / self._n, fitimg, mask | calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask | entailment |
def parse_vhdl_file(fname):
'''Parse a named VHDL file
Args:
fname(str): Name of file to parse
Returns:
Parsed objects.
'''
with open(fname, 'rt') as fh:
text = fh.read()
return parse_vhdl(text) | Parse a named VHDL file
Args:
fname(str): Name of file to parse
Returns:
Parsed objects. | entailment |
def parse_vhdl(text):
'''Parse a text buffer of VHDL code
Args:
text(str): Source code to parse
Returns:
Parsed objects.
'''
lex = VhdlLexer
name = None
kind = None
saved_type = None
end_param_group = False
cur_package = None
metacomments = []
parameters = []
param_items = []
generics = []
ports = []
sections = []
port_param_index = 0
last_item = None
array_range_start_pos = 0
objects = []
for pos, action, groups in lex.run(text):
if action == 'metacomment':
realigned = re.sub(r'^#+', lambda m: ' ' * len(m.group(0)), groups[0])
if last_item is None:
metacomments.append(realigned)
else:
last_item.desc = realigned
if action == 'section_meta':
sections.append((port_param_index, groups[0]))
elif action == 'function':
kind = 'function'
name = groups[0]
param_items = []
parameters = []
elif action == 'procedure':
kind = 'procedure'
name = groups[0]
param_items = []
parameters = []
elif action == 'param':
if end_param_group:
# Complete previous parameters
for i in param_items:
parameters.append(i)
param_items = []
end_param_group = False
param_items.append(VhdlParameter(groups[1]))
elif action == 'param_type':
mode, ptype = groups
if mode is not None:
mode = mode.strip()
for i in param_items: # Set mode and type for all pending parameters
i.mode = mode
i.data_type = ptype
end_param_group = True
elif action == 'param_default':
for i in param_items:
i.default_value = groups[0]
elif action == 'end_subprogram':
# Complete last parameters
for i in param_items:
parameters.append(i)
if kind == 'function':
vobj = VhdlFunction(name, cur_package, parameters, groups[0], metacomments)
else:
vobj = VhdlProcedure(name, cur_package, parameters, metacomments)
objects.append(vobj)
metacomments = []
parameters = []
param_items = []
kind = None
name = None
elif action == 'component':
kind = 'component'
name = groups[0]
generics = []
ports = []
param_items = []
sections = []
port_param_index = 0
elif action == 'generic_param':
param_items.append(groups[0])
elif action == 'generic_param_type':
ptype = groups[0]
for i in param_items:
generics.append(VhdlParameter(i, 'in', ptype))
param_items = []
last_item = generics[-1]
elif action == 'port_param':
param_items.append(groups[0])
port_param_index += 1
elif action == 'port_param_type':
mode, ptype = groups
for i in param_items:
ports.append(VhdlParameter(i, mode, ptype))
param_items = []
last_item = ports[-1]
elif action == 'port_array_param_type':
mode, ptype = groups
array_range_start_pos = pos[1]
elif action == 'array_range_end':
arange = text[array_range_start_pos:pos[0]+1]
for i in param_items:
ports.append(VhdlParameter(i, mode, ptype + arange))
param_items = []
last_item = ports[-1]
elif action == 'end_component':
vobj = VhdlComponent(name, cur_package, ports, generics, dict(sections), metacomments)
objects.append(vobj)
last_item = None
metacomments = []
elif action == 'package':
objects.append(VhdlPackage(groups[0]))
cur_package = groups[0]
kind = None
name = None
elif action == 'type':
saved_type = groups[0]
elif action in ('array_type', 'file_type', 'access_type', 'record_type', 'range_type', 'enum_type', 'incomplete_type'):
vobj = VhdlType(saved_type, cur_package, action, metacomments)
objects.append(vobj)
kind = None
name = None
metacomments = []
elif action == 'subtype':
vobj = VhdlSubtype(groups[0], cur_package, groups[1], metacomments)
objects.append(vobj)
kind = None
name = None
metacomments = []
elif action == 'constant':
vobj = VhdlConstant(groups[0], cur_package, groups[1], metacomments)
objects.append(vobj)
kind = None
name = None
metacomments = []
return objects | Parse a text buffer of VHDL code
Args:
text(str): Source code to parse
Returns:
Parsed objects. | entailment |
def subprogram_prototype(vo):
'''Generate a canonical prototype string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Prototype string.
'''
plist = '; '.join(str(p) for p in vo.parameters)
if isinstance(vo, VhdlFunction):
if len(vo.parameters) > 0:
proto = 'function {}({}) return {};'.format(vo.name, plist, vo.return_type)
else:
proto = 'function {} return {};'.format(vo.name, vo.return_type)
else: # procedure
proto = 'procedure {}({});'.format(vo.name, plist)
return proto | Generate a canonical prototype string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Prototype string. | entailment |
def subprogram_signature(vo, fullname=None):
'''Generate a signature string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Signature string.
'''
if fullname is None:
fullname = vo.name
if isinstance(vo, VhdlFunction):
plist = ','.join(p.data_type for p in vo.parameters)
sig = '{}[{} return {}]'.format(fullname, plist, vo.return_type)
else: # procedure
plist = ','.join(p.data_type for p in vo.parameters)
sig = '{}[{}]'.format(fullname, plist)
return sig | Generate a signature string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Signature string. | entailment |
def extract_objects_from_source(self, text, type_filter=None):
'''Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects.
'''
objects = parse_vhdl(text)
self._register_array_types(objects)
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects | Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects. | entailment |
def is_array(self, data_type):
'''Check if a type is a known array type
Args:
data_type (str): Name of type to check
Returns:
True if ``data_type`` is a known array type.
'''
# Split off any brackets
data_type = data_type.split('[')[0].strip()
return data_type.lower() in self.array_types | Check if a type is a known array type
Args:
data_type (str): Name of type to check
Returns:
True if ``data_type`` is a known array type. | entailment |
def load_array_types(self, fname):
'''Load file of previously extracted data types
Args:
fname (str): Name of file to load array database from
'''
type_defs = ''
with open(fname, 'rt') as fh:
type_defs = fh.read()
try:
type_defs = ast.literal_eval(type_defs)
except SyntaxError:
type_defs = {}
self._add_array_types(type_defs) | Load file of previously extracted data types
Args:
fname (str): Name of file to load array database from | entailment |
def save_array_types(self, fname):
'''Save array type registry to a file
Args:
fname (str): Name of file to save array database to
'''
type_defs = {'arrays': sorted(list(self.array_types))}
with open(fname, 'wt') as fh:
pprint(type_defs, stream=fh) | Save array type registry to a file
Args:
fname (str): Name of file to save array database to | entailment |
def _register_array_types(self, objects):
'''Add array type definitions to internal registry
Args:
objects (list of VhdlType or VhdlSubtype): Array types to track
'''
# Add all array types directly
types = [o for o in objects if isinstance(o, VhdlType) and o.type_of == 'array_type']
for t in types:
self.array_types.add(t.name)
subtypes = {o.name:o.base_type for o in objects if isinstance(o, VhdlSubtype)}
# Find all subtypes of an array type
for k,v in subtypes.iteritems():
while v in subtypes: # Follow subtypes of subtypes
v = subtypes[v]
if v in self.array_types:
self.array_types.add(k) | Add array type definitions to internal registry
Args:
objects (list of VhdlType or VhdlSubtype): Array types to track | entailment |
def register_array_types_from_sources(self, source_files):
'''Add array type definitions from a file list to internal registry
Args:
source_files (list of str): Files to parse for array definitions
'''
for fname in source_files:
if is_vhdl(fname):
self._register_array_types(self.extract_objects(fname)) | Add array type definitions from a file list to internal registry
Args:
source_files (list of str): Files to parse for array definitions | entailment |
def run(self, text):
'''Run lexer rules against a source text
Args:
text (str): Text to apply lexer to
Yields:
A sequence of lexer matches.
'''
stack = ['root']
pos = 0
patterns = self.tokens[stack[-1]]
while True:
for pat, action, new_state in patterns:
m = pat.match(text, pos)
if m:
if action:
#print('## MATCH: {} -> {}'.format(m.group(), action))
yield (pos, m.end()-1), action, m.groups()
pos = m.end()
if new_state:
if isinstance(new_state, int): # Pop states
del stack[new_state:]
else:
stack.append(new_state)
#print('## CHANGE STATE:', pos, new_state, stack)
patterns = self.tokens[stack[-1]]
break
else:
try:
if text[pos] == '\n':
pos += 1
continue
pos += 1
except IndexError:
break | Run lexer rules against a source text
Args:
text (str): Text to apply lexer to
Yields:
A sequence of lexer matches. | entailment |
def get_package_version(verfile):
'''Scan the script for the version string'''
version = None
with open(verfile) as fh:
try:
version = [line.split('=')[1].strip().strip("'") for line in fh if \
line.startswith('__version__')][0]
except IndexError:
pass
return version | Scan the script for the version string | entailment |
def parse_verilog_file(fname):
'''Parse a named Verilog file
Args:
fname (str): File to parse.
Returns:
List of parsed objects.
'''
with open(fname, 'rt') as fh:
text = fh.read()
return parse_verilog(text) | Parse a named Verilog file
Args:
fname (str): File to parse.
Returns:
List of parsed objects. | entailment |
def parse_verilog(text):
'''Parse a text buffer of Verilog code
Args:
text (str): Source code to parse
Returns:
List of parsed objects.
'''
lex = VerilogLexer
name = None
kind = None
saved_type = None
mode = 'input'
ptype = 'wire'
metacomments = []
parameters = []
param_items = []
generics = []
ports = collections.OrderedDict()
sections = []
port_param_index = 0
last_item = None
array_range_start_pos = 0
objects = []
for pos, action, groups in lex.run(text):
if action == 'metacomment':
if last_item is None:
metacomments.append(groups[0])
else:
last_item.desc = groups[0]
if action == 'section_meta':
sections.append((port_param_index, groups[0]))
elif action == 'module':
kind = 'module'
name = groups[0]
generics = []
ports = collections.OrderedDict()
param_items = []
sections = []
port_param_index = 0
elif action == 'parameter_start':
net_type, vec_range = groups
new_ptype = ''
if net_type is not None:
new_ptype += net_type
if vec_range is not None:
new_ptype += ' ' + vec_range
ptype = new_ptype
elif action == 'param_item':
generics.append(VerilogParameter(groups[0], 'in', ptype))
elif action == 'module_port_start':
new_mode, net_type, signed, vec_range = groups
new_ptype = ''
if net_type is not None:
new_ptype += net_type
if signed is not None:
new_ptype += ' ' + signed
if vec_range is not None:
new_ptype += ' ' + vec_range
# Complete pending items
for i in param_items:
ports[i] = VerilogParameter(i, mode, ptype)
param_items = []
if len(ports) > 0:
last_item = next(reversed(ports))
# Start with new mode
mode = new_mode
ptype = new_ptype
elif action == 'port_param':
ident = groups[0]
param_items.append(ident)
port_param_index += 1
elif action == 'end_module':
# Finish any pending ports
for i in param_items:
ports[i] = VerilogParameter(i, mode, ptype)
vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments)
objects.append(vobj)
last_item = None
metacomments = []
return objects | Parse a text buffer of Verilog code
Args:
text (str): Source code to parse
Returns:
List of parsed objects. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.