sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _validI(x, y, weights): ''' return indices that have enough data points and are not erroneous ''' # density filter: i = np.logical_and(np.isfinite(y), weights > np.median(weights)) # filter outliers: try: grad = np.abs(np.gradient(y[i])) max_gradient = 4 * np.median(grad) i[i][grad > max_gradient] = False except (IndexError, ValueError): pass return i
return indices that have enough data points and are not erroneous
entailment
def smooth(x, y, weights): ''' in case the NLF cannot be described by a square root function commit bounded polynomial interpolation ''' # Spline hard to smooth properly, therefore solfed with # bounded polynomal interpolation # ext=3: no extrapolation, but boundary value # return UnivariateSpline(x, y, w=weights, # s=len(y)*weights.max()*100, ext=3) # return np.poly1d(np.polyfit(x,y,w=weights,deg=2)) p = np.polyfit(x, y, w=weights, deg=2) if np.any(np.isnan(p)): # couldn't even do polynomial fit # as last option: assume constant noise my = np.average(y, weights=weights) return lambda x: my return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1]))
in case the NLF cannot be described by a square root function commit bounded polynomial interpolation
entailment
def oneImageNLF(img, img2=None, signal=None): ''' Estimate the NLF from one or two images of the same kind ''' x, y, weights, signal = calcNLF(img, img2, signal) _, fn, _ = _evaluate(x, y, weights) return fn, signal
Estimate the NLF from one or two images of the same kind
entailment
def _getMinMax(img): ''' Get the a range of image intensities that most pixels are in with ''' av = np.mean(img) std = np.std(img) # define range for segmentation: mn = av - 3 * std mx = av + 3 * std return max(img.min(), mn, 0), min(img.max(), mx)
Get the a range of image intensities that most pixels are in with
entailment
def calcNLF(img, img2=None, signal=None, mn_mx_nbins=None, x=None, averageFn='AAD', signalFromMultipleImages=False): ''' Calculate the noise level function (NLF) as f(intensity) using one or two image. The approach for this work is published in JPV########## img2 - 2nd image taken under same conditions used to estimate noise via image difference signalFromMultipleImages - whether the signal is an average of multiple images and not just got from one median filtered image ''' # CONSTANTS: # factor Root mead square to average-absolute-difference: F_RMS2AAD = (2 / np.pi)**-0.5 F_NOISE_WITH_MEDIAN = 1 + (1 / 3**2) N_BINS = 100 MEDIAN_KERNEL_SIZE = 3 def _averageAbsoluteDeviation(d): return np.mean(np.abs(d)) * F_RMS2AAD def _rootMeanSquare(d): return (d**2).mean()**0.5 if averageFn == 'AAD': averageFn = _averageAbsoluteDeviation else: averageFn = _rootMeanSquare img = np.asfarray(img) if img2 is None: if signal is None: signal = median_filter(img, MEDIAN_KERNEL_SIZE) if signalFromMultipleImages: diff = img - signal else: # difference between the filtered and original image: diff = (img - signal) * F_NOISE_WITH_MEDIAN else: img2 = np.asfarray(img2) diff = (img - img2) # 2**0.5 because noise is subtracted by noise # and variance of sum = sum of variance: # var(immg1-img2)~2*var(img) # std(2*var) = 2**0.5*var**0.5 diff /= 2**0.5 if signal is None: signal = median_filter(0.5 * (img + img2), MEDIAN_KERNEL_SIZE) if mn_mx_nbins is not None: mn, mx, nbins = mn_mx_nbins min_len = 0 else: mn, mx = _getMinMax(signal) s = img.shape min_len = int(s[0] * s[1] * 1e-3) if min_len < 1: min_len = 5 # number of bins/different intensity ranges to analyse: nbins = N_BINS if mx - mn < nbins: nbins = int(mx - mn) # bin width: step = (mx - mn) / nbins # empty arrays: y = np.empty(shape=nbins) set_x = False if x is None: set_x = True x = np.empty(shape=nbins) # give bins with more samples more weight: weights = np.zeros(shape=nbins) # cur step: m = mn for n in range(nbins): # get indices of all pixel with in a bin: ind = np.logical_and(signal >= m, signal <= m + step) m += step d = diff[ind] ld = len(d) if ld >= min_len: weights[n] = ld # average absolute deviation (AAD), # scaled to RMS: y[n] = averageFn(d) if set_x: x[n] = m - 0.5 * step return x, y, weights, signal
Calculate the noise level function (NLF) as f(intensity) using one or two image. The approach for this work is published in JPV########## img2 - 2nd image taken under same conditions used to estimate noise via image difference signalFromMultipleImages - whether the signal is an average of multiple images and not just got from one median filtered image
entailment
def polyfit2d(x, y, z, order=3 #bounds=None ): ''' fit unstructured data ''' ncols = (order + 1)**2 G = np.zeros((x.size, ncols)) ij = itertools.product(list(range(order+1)), list(range(order+1))) for k, (i,j) in enumerate(ij): G[:,k] = x**i * y**j m = np.linalg.lstsq(G, z)[0] return m
fit unstructured data
entailment
def polyfit2dGrid(arr, mask=None, order=3, replace_all=False, copy=True, outgrid=None): ''' replace all masked values with polynomial fitted ones ''' s0,s1 = arr.shape if mask is None: if outgrid is None: y,x = np.mgrid[:float(s0),:float(s1)] p = polyfit2d(x.flatten(),y.flatten(),arr.flatten(),order) return polyval2d(x,y, p, dtype=arr.dtype) mask = np.zeros_like(arr, dtype=bool) elif mask.sum() == 0 and not replace_all and outgrid is None: return arr valid = ~mask y,x = np.where(valid) z = arr[valid] p = polyfit2d(x,y,z,order) if outgrid is not None: yy,xx = outgrid else: if replace_all: yy,xx = np.mgrid[:float(s0),:float(s1)] else: yy,xx = np.where(mask) new = polyval2d(xx,yy, p, dtype=arr.dtype) if outgrid is not None or replace_all: return new if copy: arr = arr.copy() arr[mask] = new return arr
replace all masked values with polynomial fitted ones
entailment
def minimumLineInArray(arr, relative=False, f=0, refinePosition=True, max_pos=100, return_pos_arr=False, # order=2 ): ''' find closest minimum position next to middle line relative: return position relative to middle line f: relative decrease (0...1) - setting this value close to one will discriminate positions further away from the center ##order: 2 for cubic refinement ''' s0, s1 = arr.shape[:2] if max_pos >= s1: x = np.arange(s1) else: # take fewer positions within 0->(s1-1) x = np.rint(np.linspace(0, s1 - 1, min(max_pos, s1))).astype(int) res = np.empty((s0, s0), dtype=float) _lineSumXY(x, res, arr, f) if return_pos_arr: return res # best integer index i, j = np.unravel_index(np.nanargmin(res), res.shape) if refinePosition: try: sub = res[i - 1:i + 2, j - 1:j + 2] ii, jj = center_of_mass(sub) if not np.isnan(ii): i += (ii - 1) if not np.isnan(jj): j += (jj - 1) except TypeError: pass if not relative: return i, j hs = (s0 - 1) / 2 return i - hs, j - hs
find closest minimum position next to middle line relative: return position relative to middle line f: relative decrease (0...1) - setting this value close to one will discriminate positions further away from the center ##order: 2 for cubic refinement
entailment
def highPassFilter(self, threshold): ''' remove all low frequencies by setting a square in the middle of the Fourier transformation of the size (2*threshold)^2 to zero threshold = 0...1 ''' if not threshold: return rows, cols = self.img.shape tx = int(cols * threshold) ty = int(rows * threshold) # middle: crow, ccol = rows // 2, cols // 2 # square in the middle to zero self.fshift[crow - tx:crow + tx, ccol - ty:ccol + ty] = 0
remove all low frequencies by setting a square in the middle of the Fourier transformation of the size (2*threshold)^2 to zero threshold = 0...1
entailment
def lowPassFilter(self, threshold): ''' remove all high frequencies by setting boundary around a quarry in the middle of the size (2*threshold)^2 to zero threshold = 0...1 ''' if not threshold: return rows, cols = self.img.shape tx = int(cols * threshold * 0.25) ty = int(rows * threshold * 0.25) # upper side self.fshift[rows - tx:rows, :] = 0 # lower side self.fshift[0:tx, :] = 0 # left side self.fshift[:, 0:ty] = 0 # right side self.fshift[:, cols - ty:cols] = 0
remove all high frequencies by setting boundary around a quarry in the middle of the size (2*threshold)^2 to zero threshold = 0...1
entailment
def reconstructImage(self): ''' do inverse Fourier transform and return result ''' f_ishift = np.fft.ifftshift(self.fshift) return np.real(np.fft.ifft2(f_ishift))
do inverse Fourier transform and return result
entailment
def interpolate2dUnstructuredIDW(x, y, v, grid, power=2): ''' x,y,v --> 1d numpy.array grid --> 2d numpy.array fast if number of given values is small relative to grid resolution ''' n = len(v) gx = grid.shape[0] gy = grid.shape[1] for i in range(gx): for j in range(gy): overPx = False # if pixel position == point position sumWi = 0.0 value = 0.0 for k in range(n): xx = x[k] yy = y[k] vv = v[k] if xx == i and yy == j: grid[i, j] = vv overPx = True break # weight from inverse distance: wi = 1 / ((xx - i)**2 + (yy - j)**2)**(0.5 * power) sumWi += wi value += wi * vv if not overPx: grid[i, j] = value / sumWi return grid
x,y,v --> 1d numpy.array grid --> 2d numpy.array fast if number of given values is small relative to grid resolution
entailment
def hog(image, orientations=8, ksize=(5, 5)): ''' returns the Histogram of Oriented Gradients :param ksize: convolution kernel size as (y,x) - needs to be odd :param orientations: number of orientations in between rad=0 and rad=pi similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html but faster and with less options ''' s0, s1 = image.shape[:2] # speed up the process through saving generated kernels: try: k = hog.kernels[str(ksize) + str(orientations)] except KeyError: k = _mkConvKernel(ksize, orientations) hog.kernels[str(ksize) + str(orientations)] = k out = np.empty(shape=(s0, s1, orientations)) image[np.isnan(image)] = 0 for i in range(orientations): out[:, :, i] = convolve(image, k[i]) return out
returns the Histogram of Oriented Gradients :param ksize: convolution kernel size as (y,x) - needs to be odd :param orientations: number of orientations in between rad=0 and rad=pi similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html but faster and with less options
entailment
def visualize(hog, grid=(10, 10), radCircle=None): ''' visualize HOG as polynomial around cell center for [grid] * cells ''' s0, s1, nang = hog.shape angles = np.linspace(0, np.pi, nang + 1)[:-1] # center of each sub array: cx, cy = s0 // (2 * grid[0]), s1 // (2 * grid[1]) # max. radius of polynomial around cenetr: rx, ry = cx, cy # for drawing a position indicator (circle): if radCircle is None: radCircle = max(1, rx // 10) # output array: out = np.zeros((s0, s1), dtype=np.uint8) # point of polynomial: pts = np.empty(shape=(1, 2 * nang, 2), dtype=np.int32) # takes grid[0]*grid[1] sample HOG values: samplesHOG = subCell2DFnArray(hog, lambda arr: arr[cx, cy], grid) mxHOG = samplesHOG.max() # sub array slices: slices = list(subCell2DSlices(out, grid)) m = 0 for m, hhh in enumerate(samplesHOG.reshape(grid[0] * grid[1], nang)): hhmax = hhh.max() hh = hhh / hhmax sout = out[slices[m][2:4]] for n, (o, a) in enumerate(zip(hh, angles)): pts[0, n, 0] = cx + np.cos(a) * o * rx pts[0, n, 1] = cy + np.sin(a) * o * ry pts[0, n + nang, 0] = cx + np.cos(a + np.pi) * o * rx pts[0, n + nang, 1] = cy + np.sin(a + np.pi) * o * ry cv2.fillPoly(sout, pts, int(255 * hhmax / mxHOG)) cv2.circle(sout, (cx, cy), radCircle, 0, thickness=-1) return out
visualize HOG as polynomial around cell center for [grid] * cells
entailment
def postProcessing(arr, method='KW replace + Gauss', mask=None): ''' Post process measured flat field [arr]. Depending on the measurement, different post processing [method]s are beneficial. The available methods are presented in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- methods: 'POLY replace' --> replace [arr] with a 2d polynomial fit 'KW replace' --> ... a fitted Kang-Weiss function 'AoV replace' --> ... a fitted Angle-of-view function 'POLY repair' --> same as above but either replacing empty 'KW repair' areas of smoothing out high gradient 'AoV repair' variations (POLY only) 'KW repair + Gauss' --> same as 'KW replace' with additional 'KW repair + Median' Gaussian or Median filter mask: None/2darray(bool) --> array of same shape ar [arr] indicating invalid or empty positions ''' assert method in ppMETHODS, \ 'post processing method (%s) must be one of %s' % (method, ppMETHODS) if method == 'POLY replace': return polyfit2dGrid(arr, mask, order=2, replace_all=True) elif method == 'KW replace': return function(arr, mask, replace_all=True) elif method == 'POLY repair': return polynomial(arr, mask, replace_all=False) elif method == 'KW repair': return function(arr, mask, replace_all=False) elif method == 'KW repair + Median': return median_filter(function(arr, mask, replace_all=False), min(method.shape) // 20) elif method == 'KW repair + Gauss': return gaussian_filter(function(arr, mask, replace_all=False), min(arr.shape) // 20) elif method == 'AoV repair': return function(arr, mask, fn=lambda XY, a: angleOfView(XY, method.shape, a=a), guess=(0.01), down_scale_factor=1) elif method == 'AoV replace': return function(arr, mask, fn=lambda XY, a: angleOfView(XY, arr.shape, a=a), guess=(0.01), replace_all=True, down_scale_factor=1)
Post process measured flat field [arr]. Depending on the measurement, different post processing [method]s are beneficial. The available methods are presented in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- methods: 'POLY replace' --> replace [arr] with a 2d polynomial fit 'KW replace' --> ... a fitted Kang-Weiss function 'AoV replace' --> ... a fitted Angle-of-view function 'POLY repair' --> same as above but either replacing empty 'KW repair' areas of smoothing out high gradient 'AoV repair' variations (POLY only) 'KW repair + Gauss' --> same as 'KW replace' with additional 'KW repair + Median' Gaussian or Median filter mask: None/2darray(bool) --> array of same shape ar [arr] indicating invalid or empty positions
entailment
def rmBorder(img, border=None): ''' border [None], if images are corrected and device ends at image border [one number] (like 50), if there is an equally spaced border around the device [two tuples] like ((50,60),(1500,900)) means ((Xfrom,Yfrom),(Xto, Yto)) [four tuples] like ((x0,y0),(x1,y1),...(x3,y3)) ''' if border is None: pass elif len(border) == 2: s0 = slice(border[0][1], border[1][1]) s1 = slice(border[0][0], border[1][0]) img = img[s0, s1] elif len(border) == 4: # eval whether border values are orthogonal: x = np.unique(border[:, 0]) y = np.unique(border[:, 1]) if len(x) == 2 and len(y) == 2: s0 = slice(y[0], y[1]) s1 = slice(x[0], x[1]) img = img[s0, s1] else: # edges are irregular: img = simplePerspectiveTransform(img, border) else: raise Exception('[border] input wrong') return img
border [None], if images are corrected and device ends at image border [one number] (like 50), if there is an equally spaced border around the device [two tuples] like ((50,60),(1500,900)) means ((Xfrom,Yfrom),(Xto, Yto)) [four tuples] like ((x0,y0),(x1,y1),...(x3,y3))
entailment
def addImage(self, image, mask=None): ''' ######### mask -- optional ''' self._last_diff = diff = image - self.noSTE ste = diff > self.threshold removeSinglePixels(ste) self.mask_clean = clean = ~ste if mask is not None: clean = np.logical_and(mask, clean) self.mma.update(image, clean) if self.save_ste_indices: self.mask_STE += ste return self
######### mask -- optional
entailment
def relativeAreaSTE(self): ''' return STE area - relative to image area ''' s = self.noSTE.shape return np.sum(self.mask_STE) / (s[0] * s[1])
return STE area - relative to image area
entailment
def intensityDistributionSTE(self, bins=10, range=None): ''' return distribution of STE intensity ''' v = np.abs(self._last_diff[self.mask_STE]) return np.histogram(v, bins, range)
return distribution of STE intensity
entailment
def toUIntArray(img, dtype=None, cutNegative=True, cutHigh=True, range=None, copy=True): ''' transform a float to an unsigned integer array of a fitting dtype adds an offset, to get rid of negative values range = (min, max) - scale values between given range cutNegative - all values <0 will be set to 0 cutHigh - set to False to rather scale values to fit ''' mn, mx = None, None if range is not None: mn, mx = range if dtype is None: if mx is None: mx = np.nanmax(img) dtype = np.uint16 if mx > 255 else np.uint8 dtype = np.dtype(dtype) if dtype == img.dtype: return img # get max px value: b = {'uint8': 255, 'uint16': 65535, 'uint32': 4294967295, 'uint64': 18446744073709551615}[dtype.name] if copy: img = img.copy() if range is not None: img = np.asfarray(img) img -= mn # img[img<0]=0 # print np.nanmin(img), np.nanmax(img), mn, mx, range, b img *= b / (mx - mn) # print np.nanmin(img), np.nanmax(img), mn, mx, range, b img = np.clip(img, 0, b) else: if cutNegative: img[img < 0] = 0 else: # add an offset to all values: mn = np.min(img) if mn < 0: img -= mn # set minimum to 0 if cutHigh: #ind = img > b img[img > b] = b else: # scale values mx = np.nanmax(img) img = np.asfarray(img) * (float(b) / mx) img = img.astype(dtype) # if range is not None and cutHigh: # img[ind] = b return img
transform a float to an unsigned integer array of a fitting dtype adds an offset, to get rid of negative values range = (min, max) - scale values between given range cutNegative - all values <0 will be set to 0 cutHigh - set to False to rather scale values to fit
entailment
def toFloatArray(img): ''' transform an unsigned integer array into a float array of the right size ''' _D = {1: np.float32, # uint8 2: np.float32, # uint16 4: np.float64, # uint32 8: np.float64} # uint64 return img.astype(_D[img.itemsize])
transform an unsigned integer array into a float array of the right size
entailment
def toNoUintArray(arr): ''' cast array to the next higher integer array if dtype=unsigned integer ''' d = arr.dtype if d.kind == 'u': arr = arr.astype({1: np.int16, 2: np.int32, 4: np.int64}[d.itemsize]) return arr
cast array to the next higher integer array if dtype=unsigned integer
entailment
def toGray(img): ''' weights see https://en.wikipedia.org/wiki/Grayscale#Colorimetric_.28luminance-prese http://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor ''' return np.average(img, axis=-1, weights=(0.299, # red 0.587, # green 0.114) # blue ).astype(img.dtype)
weights see https://en.wikipedia.org/wiki/Grayscale#Colorimetric_.28luminance-prese http://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
entailment
def rgChromaticity(img): ''' returns the normalized RGB space (RGB/intensity) see https://en.wikipedia.org/wiki/Rg_chromaticity ''' out = _calc(img) if img.dtype == np.uint8: out = (255 * out).astype(np.uint8) return out
returns the normalized RGB space (RGB/intensity) see https://en.wikipedia.org/wiki/Rg_chromaticity
entailment
def monochromaticWavelength(img): ''' TODO########## ''' # peak wave lengths: https://en.wikipedia.org/wiki/RGB_color_model out = _calc(img) peakWavelengths = (570, 540, 440) # (r,g,b) # s = sum(peakWavelengths) for n, p in enumerate(peakWavelengths): out[..., n] *= p return out.sum(axis=2)
TODO##########
entailment
def rot90(img): ''' rotate one or multiple grayscale or color images 90 degrees ''' s = img.shape if len(s) == 3: if s[2] in (3, 4): # color image out = np.empty((s[1], s[0], s[2]), dtype=img.dtype) for i in range(s[2]): out[:, :, i] = np.rot90(img[:, :, i]) else: # mutliple grayscale out = np.empty((s[0], s[2], s[1]), dtype=img.dtype) for i in range(s[0]): out[i] = np.rot90(img[i]) elif len(s) == 2: # one grayscale out = np.rot90(img) elif len(s) == 4 and s[3] in (3, 4): # multiple color out = np.empty((s[0], s[2], s[1], s[3]), dtype=img.dtype) for i in range(s[0]): # for each img for j in range(s[3]): # for each channel out[i, :, :, j] = np.rot90(img[i, :, :, j]) else: NotImplemented return out
rotate one or multiple grayscale or color images 90 degrees
entailment
def applyColorMap(gray, cmap='flame'): ''' like cv2.applyColorMap(im_gray, cv2.COLORMAP_*) but with different color maps ''' # TODO:implement more cmaps if cmap != 'flame': raise NotImplemented # TODO: make better mx = 256 # if gray.dtype==np.uint8 else 65535 lut = np.empty(shape=(256, 3)) cmap = ( # taken from pyqtgraph GradientEditorItem (0, (0, 0, 0)), (0.2, (7, 0, 220)), (0.5, (236, 0, 134)), (0.8, (246, 246, 0)), (1.0, (255, 255, 255)) ) # build lookup table: lastval, lastcol = cmap[0] for step, col in cmap[1:]: val = int(step * mx) for i in range(3): lut[lastval:val, i] = np.linspace( lastcol[i], col[i], val - lastval) lastcol = col lastval = val s0, s1 = gray.shape out = np.empty(shape=(s0, s1, 3), dtype=np.uint8) for i in range(3): out[..., i] = cv2.LUT(gray, lut[:, i]) return out
like cv2.applyColorMap(im_gray, cv2.COLORMAP_*) but with different color maps
entailment
def _insertDateIndex(date, l): ''' returns the index to insert the given date in a list where each items first value is a date ''' return next((i for i, n in enumerate(l) if n[0] < date), len(l))
returns the index to insert the given date in a list where each items first value is a date
entailment
def _getFromDate(l, date): ''' returns the index of given or best fitting date ''' try: date = _toDate(date) i = _insertDateIndex(date, l) - 1 if i == -1: return l[0] return l[i] except (ValueError, TypeError): # ValueError: date invalid / TypeError: date = None return l[0]
returns the index of given or best fitting date
entailment
def dates(self, typ, light=None): ''' Args: typ: type of calibration to look for. See .coeffs.keys() for all types available light (Optional[str]): restrict to calibrations, done given light source Returns: list: All calibration dates available for given typ ''' try: d = self._getDate(typ, light) return [self._toDateStr(c[0]) for c in d] except KeyError: return []
Args: typ: type of calibration to look for. See .coeffs.keys() for all types available light (Optional[str]): restrict to calibrations, done given light source Returns: list: All calibration dates available for given typ
entailment
def infos(self, typ, light=None, date=None): ''' Args: typ: type of calibration to look for. See .coeffs.keys() for all types available date (Optional[str]): date of calibration Returns: list: all infos available for given typ ''' d = self._getDate(typ, light) if date is None: return [c[1] for c in d] # TODO: not struct time, but time in ms since epoch return _getFromDate(d, date)[1]
Args: typ: type of calibration to look for. See .coeffs.keys() for all types available date (Optional[str]): date of calibration Returns: list: all infos available for given typ
entailment
def overview(self): ''' Returns: str: an overview covering all calibrations infos and shapes ''' c = self.coeffs out = 'camera name: %s' % c['name'] out += '\nmax value: %s' % c['depth'] out += '\nlight spectra: %s' % c['light spectra'] out += '\ndark current:' for (date, info, (slope, intercept), error) in c['dark current']: out += '\n\t date: %s' % self._toDateStr(date) out += '\n\t\t info: %s; slope:%s, intercept:%s' % ( info, slope.shape, intercept.shape) out += '\nflat field:' for light, vals in c['flat field'].items(): out += '\n\t light: %s' % light for (date, info, arr, error) in vals: out += '\n\t\t date: %s' % self._toDateStr(date) out += '\n\t\t\t info: %s; array:%s' % (info, arr.shape) out += '\nlens:' for light, vals in c['lens'].items(): out += '\n\t light: %s' % light for (date, info, coeffs) in vals: out += '\n\t\t date: %s' % self._toDateStr(date) out += '\n\t\t\t info: %s; coeffs:%s' % (info, coeffs) out += '\nnoise:' for (date, info, nlf_coeff, error) in c['noise']: out += '\n\t date: %s' % self._toDateStr(date) out += '\n\t\t info: %s; coeffs:%s' % (info, nlf_coeff) out += '\nPoint spread function:' for light, vals in c['psf'].items(): out += '\n\t light: %s' % light for (date, info, psf) in vals: out += '\n\t\t date: %s' % self._toDateStr(date) out += '\n\t\t\t info: %s; shape:%s' % (info, psf.shape) return out
Returns: str: an overview covering all calibrations infos and shapes
entailment
def setCamera(self, camera_name, bit_depth=16): ''' Args: camera_name (str): Name of the camera bit_depth (int): depth (bit) of the camera sensor ''' self.coeffs['name'] = camera_name self.coeffs['depth'] = bit_depth
Args: camera_name (str): Name of the camera bit_depth (int): depth (bit) of the camera sensor
entailment
def addDarkCurrent(self, slope, intercept=None, date=None, info='', error=None): ''' Args: slope (np.array) intercept (np.array) error (numpy.array) slope (float): dPx/dExposureTime[sec] error (float): absolute date (str): "DD Mon YY" e.g. "30 Nov 16" ''' date = _toDate(date) self._checkShape(slope) self._checkShape(intercept) d = self.coeffs['dark current'] if intercept is None: data = slope else: data = (slope, intercept) d.insert(_insertDateIndex(date, d), [date, info, data, error])
Args: slope (np.array) intercept (np.array) error (numpy.array) slope (float): dPx/dExposureTime[sec] error (float): absolute date (str): "DD Mon YY" e.g. "30 Nov 16"
entailment
def addNoise(self, nlf_coeff, date=None, info='', error=None): ''' Args: nlf_coeff (list) error (float): absolute info (str): additional information date (str): "DD Mon YY" e.g. "30 Nov 16" ''' date = _toDate(date) d = self.coeffs['noise'] d.insert(_insertDateIndex(date, d), [date, info, nlf_coeff, error])
Args: nlf_coeff (list) error (float): absolute info (str): additional information date (str): "DD Mon YY" e.g. "30 Nov 16"
entailment
def addPSF(self, psf, date=None, info='', light_spectrum='visible'): ''' add a new point spread function ''' self._registerLight(light_spectrum) date = _toDate(date) f = self.coeffs['psf'] if light_spectrum not in f: f[light_spectrum] = [] f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]), [date, info, psf])
add a new point spread function
entailment
def addFlatField(self, arr, date=None, info='', error=None, light_spectrum='visible'): ''' light_spectrum = light, IR ... ''' self._registerLight(light_spectrum) self._checkShape(arr) date = _toDate(date) f = self.coeffs['flat field'] if light_spectrum not in f: f[light_spectrum] = [] f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]), [date, info, arr, error])
light_spectrum = light, IR ...
entailment
def addLens(self, lens, date=None, info='', light_spectrum='visible'): ''' lens -> instance of LensDistortion or saved file ''' self._registerLight(light_spectrum) date = _toDate(date) if not isinstance(lens, LensDistortion): l = LensDistortion() l.readFromFile(lens) lens = l f = self.coeffs['lens'] if light_spectrum not in f: f[light_spectrum] = [] f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]), [date, info, lens.coeffs])
lens -> instance of LensDistortion or saved file
entailment
def clearOldCalibrations(self, date=None): ''' if not only a specific date than remove all except of the youngest calibration ''' self.coeffs['dark current'] = [self.coeffs['dark current'][-1]] self.coeffs['noise'] = [self.coeffs['noise'][-1]] for light in self.coeffs['flat field']: self.coeffs['flat field'][light] = [ self.coeffs['flat field'][light][-1]] for light in self.coeffs['lens']: self.coeffs['lens'][light] = [self.coeffs['lens'][light][-1]]
if not only a specific date than remove all except of the youngest calibration
entailment
def transpose(self): ''' transpose all calibration arrays in case different array shape orders were used (x,y) vs. (y,x) ''' def _t(item): if type(item) == list: for n, it in enumerate(item): if type(it) == tuple: it = list(it) item[n] = it if type(it) == list: _t(it) if isinstance(it, np.ndarray) and it.shape == s: item[n] = it.T s = self.coeffs['shape'] for item in self.coeffs.values(): if type(item) == dict: for item2 in item.values(): _t(item2) else: _t(item) self.coeffs['shape'] = s[::-1]
transpose all calibration arrays in case different array shape orders were used (x,y) vs. (y,x)
entailment
def correct(self, images, bgImages=None, exposure_time=None, light_spectrum=None, threshold=0.1, keep_size=True, date=None, deblur=False, denoise=False): ''' exposure_time [s] date -> string e.g. '30. Nov 15' to get a calibration on from date -> {'dark current':'30. Nov 15', 'flat field':'15. Nov 15', 'lens':'14. Nov 15', 'noise':'01. Nov 15'} ''' print('CORRECT CAMERA ...') if isinstance(date, string_types) or date is None: date = {'dark current': date, 'flat field': date, 'lens': date, 'noise': date, 'psf': date} if light_spectrum is None: try: light_spectrum = self.coeffs['light spectra'][0] except IndexError: pass # do we have multiple images? if (type(images) in (list, tuple) or (isinstance(images, np.ndarray) and images.ndim == 3 and images.shape[-1] not in (3, 4) # is color )): if len(images) > 1: # 0.NOISE n = self.coeffs['noise'] if self.noise_level_function is None and len(n): n = _getFromDate(n, date['noise'])[2] self.noise_level_function = lambda x: NoiseLevelFunction.boundedFunction( x, *n) print('... remove single-time-effects from images ') # 1. STE REMOVAL ONLY IF >=2 IMAGES ARE GIVEN: ste = SingleTimeEffectDetection(images, nStd=4, noise_level_function=self.noise_level_function) image = ste.noSTE if self.noise_level_function is None: self.noise_level_function = ste.noise_level_function else: image = np.asfarray(imread(images[0], dtype=np.float)) else: image = np.asfarray(imread(images, dtype=np.float)) self._checkShape(image) self.last_light_spectrum = light_spectrum self.last_img = image # 2. BACKGROUND REMOVAL try: self._correctDarkCurrent(image, exposure_time, bgImages, date['dark current']) except Exception as errm: print('Error: %s' % errm) # 3. VIGNETTING/SENSITIVITY CORRECTION: try: self._correctVignetting(image, light_spectrum, date['flat field']) except Exception as errm: print('Error: %s' % errm) # 4. REPLACE DECECTIVE PX WITH MEDIAN FILTERED FALUE if threshold > 0: print('... remove artefacts') try: image = self._correctArtefacts(image, threshold) except Exception as errm: print('Error: %s' % errm) # 5. DEBLUR if deblur: print('... remove blur') try: image = self._correctBlur(image, light_spectrum, date['psf']) except Exception as errm: print('Error: %s' % errm) # 5. LENS CORRECTION: try: image = self._correctLens(image, light_spectrum, date['lens'], keep_size) except TypeError: 'Error: no lens calibration found' except Exception as errm: print('Error: %s' % errm) # 6. Denoise if denoise: print('... denoise ... this might take some time') image = self._correctNoise(image) print('DONE') return image
exposure_time [s] date -> string e.g. '30. Nov 15' to get a calibration on from date -> {'dark current':'30. Nov 15', 'flat field':'15. Nov 15', 'lens':'14. Nov 15', 'noise':'01. Nov 15'}
entailment
def _correctNoise(self, image): ''' denoise using non-local-means with guessing best parameters ''' from skimage.restoration import denoise_nl_means # save startup time image[np.isnan(image)] = 0 # otherwise result =nan out = denoise_nl_means(image, patch_size=7, patch_distance=11, #h=signalStd(image) * 0.1 ) return out
denoise using non-local-means with guessing best parameters
entailment
def _correctDarkCurrent(self, image, exposuretime, bgImages, date): ''' open OR calculate a background image: f(t)=m*t+n ''' # either exposureTime or bgImages has to be given # if exposuretime is not None or bgImages is not None: print('... remove dark current') if bgImages is not None: if (type(bgImages) in (list, tuple) or (isinstance(bgImages, np.ndarray) and bgImages.ndim == 3)): if len(bgImages) > 1: # if multiple images are given: do STE removal: nlf = self.noise_level_function bg = SingleTimeEffectDetection( bgImages, nStd=4, noise_level_function=nlf).noSTE else: bg = imread(bgImages[0]) else: bg = imread(bgImages) else: bg = self.calcDarkCurrent(exposuretime, date) self.temp['bg'] = bg image -= bg
open OR calculate a background image: f(t)=m*t+n
entailment
def _correctArtefacts(self, image, threshold): ''' Apply a thresholded median replacing high gradients and values beyond the boundaries ''' image = np.nan_to_num(image) medianThreshold(image, threshold, copy=False) return image
Apply a thresholded median replacing high gradients and values beyond the boundaries
entailment
def getCoeff(self, name, light=None, date=None): ''' try to get calibration for right light source, but use another if they is none existent ''' d = self.coeffs[name] try: c = d[light] except KeyError: try: k, i = next(iter(d.items())) if light is not None: print( 'no calibration found for [%s] - using [%s] instead' % (light, k)) except StopIteration: return None c = i except TypeError: # coeff not dependent on light source c = d return _getFromDate(c, date)
try to get calibration for right light source, but use another if they is none existent
entailment
def vignettingFromRandomSteps(imgs, bg, inPlane_scale_factor=None, debugFolder=None, **kwargs): ''' important: first image should shown most iof the device because it is used as reference ''' # TODO: inPlane_scale_factor if debugFolder: debugFolder = PathStr(debugFolder) s = ObjectVignettingSeparation(imgs[0], bg, **kwargs) for img in imgs[1:]: fit = s.addImg(img) if debugFolder and fit is not False: imwrite(debugFolder.join('fit_%s.tiff' % len(s.fits)), fit) if debugFolder: imwrite(debugFolder.join('init.tiff'), s.flatField) smoothed_ff, mask, flatField, obj = s.separate() if debugFolder: imwrite(debugFolder.join('object.tiff'), obj) imwrite(debugFolder.join('flatfield.tiff'), flatField, dtype=float) imwrite(debugFolder.join('flatfield_smoothed.tiff'), smoothed_ff, dtype=float) return smoothed_ff, mask
important: first image should shown most iof the device because it is used as reference
entailment
def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12, borderWidth=3): # borderWidth=100 """ Args: img (path or array): image containing the same object as in the reference image Kwargs: maxShear (float): In order to define a good fit, refect higher shear values between this and the reference image maxRot (float): Same for rotation minMatches (int): Minimum of mating points found in both, this and the reference image """ try: fit, img, H, H_inv, nmatched = self._fitImg(img) except Exception as e: print(e) return # CHECK WHETHER FIT IS GOOD ENOUGH: (translation, rotation, scale, shear) = decompHomography(H) print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s' % (translation, rotation, scale, shear)) if (nmatched > minMatches and abs(shear) < maxShear and abs(rotation) < maxRot): print('==> img added') # HOMOGRAPHY: self.Hs.append(H) # INVERSE HOMOGRSAPHY self.Hinvs.append(H_inv) # IMAGES WARPED TO THE BASE IMAGE self.fits.append(fit) # ADD IMAGE TO THE INITIAL flatField ARRAY: i = img > self.signal_ranges[-1][0] # remove borders (that might have erroneous light): i = minimum_filter(i, borderWidth) self._ff_mma.update(img, i) # create fit img mask: mask = fit < self.signal_ranges[-1][0] mask = maximum_filter(mask, borderWidth) # IGNORE BORDER r = self.remove_border_size if r: mask[:r, :] = 1 mask[-r:, :] = 1 mask[:, -r:] = 1 mask[:, :r] = 1 self._fit_masks.append(mask) # image added return fit return False
Args: img (path or array): image containing the same object as in the reference image Kwargs: maxShear (float): In order to define a good fit, refect higher shear values between this and the reference image maxRot (float): Same for rotation minMatches (int): Minimum of mating points found in both, this and the reference image
entailment
def error(self, nCells=15): ''' calculate the standard deviation of all fitted images, averaged to a grid ''' s0, s1 = self.fits[0].shape aR = s0 / s1 if aR > 1: ss0 = int(nCells) ss1 = int(ss0 / aR) else: ss1 = int(nCells) ss0 = int(ss1 * aR) L = len(self.fits) arr = np.array(self.fits) arr[np.array(self._fit_masks)] = np.nan avg = np.tile(np.nanmean(arr, axis=0), (L, 1, 1)) arr = (arr - avg) / avg out = np.empty(shape=(L, ss0, ss1)) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) for n, f in enumerate(arr): out[n] = subCell2DFnArray(f, np.nanmean, (ss0, ss1)) return np.nanmean(out**2)**0.5
calculate the standard deviation of all fitted images, averaged to a grid
entailment
def _fitImg(self, img): ''' fit perspective and size of the input image to the reference image ''' img = imread(img, 'gray') if self.bg is not None: img = cv2.subtract(img, self.bg) if self.lens is not None: img = self.lens.correct(img, keepSize=True) (H, _, _, _, _, _, _, n_matches) = self.findHomography(img) H_inv = self.invertHomography(H) s = self.obj_shape fit = cv2.warpPerspective(img, H_inv, (s[1], s[0])) return fit, img, H, H_inv, n_matches
fit perspective and size of the input image to the reference image
entailment
def _findObject(self, img): ''' Create a bounding box around the object within an image ''' from imgProcessor.imgSignal import signalMinimum # img is scaled already i = img > signalMinimum(img) # img.max()/2.5 # filter noise, single-time-effects etc. from mask: i = minimum_filter(i, 4) return boundingBox(i)
Create a bounding box around the object within an image
entailment
def filterVerticalLines(arr, min_line_length=4): """ Remove vertical lines in boolean array if linelength >=min_line_length """ gy = arr.shape[0] gx = arr.shape[1] mn = min_line_length-1 for i in range(gy): for j in range(gx): if arr[i,j]: for d in range(min_line_length): if not arr[i+d,j]: break if d == mn: d = 0 while True: if not arr[i+d,j]: break arr[i+d,j] = 0 d +=1
Remove vertical lines in boolean array if linelength >=min_line_length
entailment
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50): ''' Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y ''' x, y = xy # distance to image center: dist = ((x - cx)**2 + (y - cy)**2)**0.5 # OFF_AXIS ILLUMINATION FACTOR: A = 1.0 / (1 + (dist / f)**2)**2 # GEOMETRIC FACTOR: if alpha != 0: G = (1 - alpha * dist) else: G = 1 # TILT FACTOR: if tilt != 0: T = tiltFactor((x, y), f, tilt, rot, (cy, cx)) else: T = 1 return A * G * T
Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y
entailment
def tiltFactor(xy, f, tilt, rot, center=None): ''' this function is extra to only cover vignetting through perspective distortion f - focal length [px] tau - tilt angle of a planar scene [radian] rot - rotation angle of a planar scene [radian] ''' x, y = xy arr = np.cos(tilt) * ( 1 + (np.tan(tilt) / f) * ( x * np.sin(rot) - y * np.cos(rot)))**3 return arr
this function is extra to only cover vignetting through perspective distortion f - focal length [px] tau - tilt angle of a planar scene [radian] rot - rotation angle of a planar scene [radian]
entailment
def imgAverage(images, copy=True): ''' returns an image average works on many, also unloaded images minimises RAM usage ''' i0 = images[0] out = imread(i0, dtype='float') if copy and id(i0) == id(out): out = out.copy() for i in images[1:]: out += imread(i, dtype='float') out /= len(images) return out
returns an image average works on many, also unloaded images minimises RAM usage
entailment
def offsetMeshgrid(offset, grid, shape): ''' Imagine you have cell averages [grid] on an image. the top-left position of [grid] within the image can be variable [offset] offset(x,y) e.g.(0,0) if no offset grid(nx,ny) resolution of smaller grid shape(x,y) -> output shape returns meshgrid to be used to upscale [grid] to [shape] resolution ''' g0,g1 = grid s0,s1 = shape o0, o1 = offset #rescale to small grid: o0 = - o0/ s0 * (g0-1) o1 = - o1/ s1 * (g1-1) xx,yy = np.meshgrid(np.linspace(o1, o1+g1-1, s1), np.linspace(o0,o0+g0-1, s0)) return yy,xx
Imagine you have cell averages [grid] on an image. the top-left position of [grid] within the image can be variable [offset] offset(x,y) e.g.(0,0) if no offset grid(nx,ny) resolution of smaller grid shape(x,y) -> output shape returns meshgrid to be used to upscale [grid] to [shape] resolution
entailment
def poisson(x, a, b, c, d=0): ''' Poisson function a -> height of the curve's peak b -> position of the center of the peak c -> standard deviation d -> offset ''' from scipy.misc import factorial #save startup time lamb = 1 X = (x/(2*c)).astype(int) return a * (( lamb**X/factorial(X)) * np.exp(-lamb) ) +d
Poisson function a -> height of the curve's peak b -> position of the center of the peak c -> standard deviation d -> offset
entailment
def rotate(image, angle, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT, borderValue=0): ''' angle [deg] ''' s0, s1 = image.shape image_center = (s0 - 1) / 2., (s1 - 1) / 2. rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0) result = cv2.warpAffine(image, rot_mat, image.shape, flags=interpolation, borderMode=borderMode, borderValue=borderValue) return result
angle [deg]
entailment
def adjustUncertToExposureTime(facExpTime, uncertMap, evtLenMap): ''' Adjust image uncertainty (measured at exposure time t0) to new exposure time facExpTime --> new exp.time / reference exp.time =(t/t0) uncertMap --> 2d array mapping image uncertainty evtLen --> 2d array mapping event duration within image [sec] event duration is relative to exposure time e.g. duration = 2 means event is 2x longer than exposure time More information can be found at ... ---- K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017 Subsection 5.1.4.3: Exposure Time Dependency ---- ''' #fit parameters, obtained from ####[simulateUncertDependencyOnExpTime] params = np.array( #a facExpTime f_0 f_inf [[ 2.63017121e+00, 3.05873627e-01, 1.00000000e+01, 2.78233309e-01], [ 2.26467931e+00, 2.86206621e-01, 8.01396977e+00, 2.04089232e-01], [ 1.27361168e+00, 5.18377189e-01, 3.04180084e+00, 2.61396338e-01], [ 7.34546040e-01, 7.34549823e-01, 1.86507345e+00, 2.77563156e-01], [ 3.82715618e-01, 9.32410141e-01, 1.34510254e+00, 2.91228149e-01], [ 1.71166071e-01, 1.14092885e+00, 1.11243702e+00, 3.07947386e-01], [ 6.13455410e-02, 1.43802520e+00, 1.02995065e+00, 3.93920802e-01], [ 1.65383071e-02, 1.75605076e+00, 1.00859395e+00, 5.02132321e-01], [ 4.55800114e-03, 1.99855711e+00, 9.98819118e-01, 5.99572776e-01]]) #event duration relative to exposure time:(1/16...16) dur = np.array([ 0.0625, 0.125 , 0.25 , 0.5 , 1. , 2. , 4. , 8. , 16. ]) #get factors from interpolation: a = UnivariateSpline(dur, params[:, 0], k=3, s=0) b = UnivariateSpline(dur, params[:, 1], k=3, s=0) start = UnivariateSpline(dur, params[:, 2], k=3, s=0) end = UnivariateSpline(dur, params[:, 3], k=3, s=0) p0 = a(evtLenMap), b(evtLenMap), start(evtLenMap), end(evtLenMap) #uncertainty for new exposure time: out = uncertMap * _fitfn(facExpTime, *p0) # everywhere where there ARE NO EVENTS --> scale uncert. as if would # be normal distributed: i = evtLenMap == 0 out[i] = uncertMap[i] * (1 / facExpTime)**0.5 return out
Adjust image uncertainty (measured at exposure time t0) to new exposure time facExpTime --> new exp.time / reference exp.time =(t/t0) uncertMap --> 2d array mapping image uncertainty evtLen --> 2d array mapping event duration within image [sec] event duration is relative to exposure time e.g. duration = 2 means event is 2x longer than exposure time More information can be found at ... ---- K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017 Subsection 5.1.4.3: Exposure Time Dependency ----
entailment
def gaussian(x, a, b, c, d=0): ''' a -> height of the curve's peak b -> position of the center of the peak c -> standard deviation or Gaussian RMS width d -> offset ''' return a * np.exp( -(((x-b)**2 )/ (2*(c**2))) ) + d
a -> height of the curve's peak b -> position of the center of the peak c -> standard deviation or Gaussian RMS width d -> offset
entailment
def videoWrite(path, imgs, levels=None, shape=None, frames=15, annotate_names=None, lut=None, updateFn=None): ''' TODO ''' frames = int(frames) if annotate_names is not None: assert len(annotate_names) == len(imgs) if levels is None: if imgs[0].dtype == np.uint8: levels = 0, 255 elif imgs[0].dtype == np.uint16: levels = 0, 2**16 - 1 else: levels = np.min(imgs), np.max(imgs) fourcc = cv2.VideoWriter_fourcc(*'XVID') h, w = imgs.shape[1:3] if shape and shape != (h, w): h, w = shape imgs = [cv2.resize(i, (w, h)) for i in imgs] assert path[-3:] in ('avi', 'png'), 'video export only supports *.avi or *.png' isVideo = path[-3:] == 'avi' if isVideo: cap = cv2.VideoCapture(0) # im.ndim==4) out = cv2.VideoWriter(path, fourcc, frames, (w, h), isColor=1) times = np.linspace(0, len(imgs) - 1, len(imgs) * frames) interpolator = LinearInterpolateImageStack(imgs) if lut is not None: lut = lut(imgs[0]) for n, time in enumerate(times): if updateFn: # update progress: updateFn.emit(100 * n / len(times)) image = interpolator(time) cimg = makeRGBA(image, lut=lut, levels=levels)[0] cimg = cv2.cvtColor(cimg, cv2.COLOR_RGBA2BGR) if annotate_names: text = annotate_names[n // frames] alpha = 0.5 org = (0, cimg.shape[0]) fontFace = cv2.FONT_HERSHEY_PLAIN fontScale = 2 thickness = 3 putTextAlpha(cimg, text, alpha, org, fontFace, fontScale, (0, 255, 0), thickness ) if isVideo: out.write(cimg) else: cv2.imwrite('%s_%i_%.3f.png' % (path[:-4], n, time), cimg) if isVideo: cap.release() out.release()
TODO
entailment
def imread(img, color=None, dtype=None): ''' dtype = 'noUint', uint8, float, 'float', ... ''' COLOR2CV = {'gray': cv2.IMREAD_GRAYSCALE, 'all': cv2.IMREAD_COLOR, None: cv2.IMREAD_ANYCOLOR } c = COLOR2CV[color] if callable(img): img = img() elif isinstance(img, string_types): # from_file = True # try: # ftype = img[img.find('.'):] # img = READERS[ftype](img)[0] # except KeyError: # open with openCV # grey - 8 bit if dtype in (None, "noUint") or np.dtype(dtype) != np.uint8: c |= cv2.IMREAD_ANYDEPTH img2 = cv2.imread(img, c) if img2 is None: raise IOError("image '%s' is not existing" % img) img = img2 elif color == 'gray' and img.ndim == 3: # multi channel img like rgb # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cannot handle float64 img = toGray(img) # transform array to uint8 array due to openCV restriction if dtype is not None: if isinstance(img, np.ndarray): img = _changeArrayDType(img, dtype, cutHigh=False) return img
dtype = 'noUint', uint8, float, 'float', ...
entailment
def addImg(self, img, roi=None): ''' img - background, flat field, ste corrected image roi - [(x1,y1),...,(x4,y4)] - boundaries where points are ''' self.img = imread(img, 'gray') s0, s1 = self.img.shape if roi is None: roi = ((0, 0), (s0, 0), (s0, s1), (0, s1)) k = self.kernel_size hk = k // 2 # mask image img2 = self.img.copy() # .astype(int) mask = np.zeros(self.img.shape) cv2.fillConvexPoly(mask, np.asarray(roi, dtype=np.int32), color=1) mask = mask.astype(bool) im = img2[mask] bg = im.mean() # assume image average with in roi == background mask = ~mask img2[mask] = -1 # find points from local maxima: self.points = np.zeros(shape=(self.max_points, 2), dtype=int) thresh = 0.8 * bg + 0.2 * im.max() _findPoints(img2, thresh, self.min_dist, self.points) self.points = self.points[:np.argmin(self.points, axis=0)[0]] # correct point position, to that every point is over max value: for n, p in enumerate(self.points): sub = self.img[p[1] - hk:p[1] + hk + 1, p[0] - hk:p[0] + hk + 1] i, j = np.unravel_index(np.nanargmax(sub), sub.shape) self.points[n] += [j - hk, i - hk] # remove points that are too close to their neighbour or the border mask = maximum_filter(mask, hk) i = np.ones(self.points.shape[0], dtype=bool) for n, p in enumerate(self.points): if mask[p[1], p[0]]: # too close to border i[n] = False else: # too close to other points for pp in self.points[n + 1:]: if norm(p - pp) < hk + 1: i[n] = False isum = i.sum() ll = len(i) - isum print('found %s points' % isum) if ll: print( 'removed %s points (too close to border or other points)' % ll) self.points = self.points[i] # self.n_points += len(self.points) # for finding best peak position: # def fn(xy,cx,cy):#par # (x,y) = xy # return 1-(((x-cx)**2 + (y-cy)**2)*(1/8)).flatten() # x,y = np.mgrid[-2:3,-2:3] # x = x.flatten() # y = y.flatten() # for shifting peak: xx, yy = np.mgrid[0:k, 0:k] xx = xx.astype(float) yy = yy.astype(float) self.subs = [] # import pylab as plt # plt.figure(20) # img = self.drawPoints() # plt.imshow(img, interpolation='none') # # plt.figure(21) # # plt.imshow(sub2, interpolation='none') # plt.show() #thresh = 0.8*bg + 0.1*im.max() for i, p in enumerate(self.points): sub = self.img[p[1] - hk:p[1] + hk + 1, p[0] - hk:p[0] + hk + 1].astype(float) sub2 = sub.copy() mean = sub2.mean() mx = sub2.max() sub2[sub2 < 0.5 * (mean + mx)] = 0 # only select peak try: # SHIFT SUB ARRAY to align peak maximum exactly in middle: # only eval a 5x5 array in middle of sub: # peak = sub[hk-3:hk+4,hk-3:hk+4]#.copy() # peak -= peak.min() # peak/=peak.max() # peak = peak.flatten() # fit paraboloid to get shift in x,y: # p, _ = curve_fit(fn, (x,y), peak, (0,0)) c0, c1 = center_of_mass(sub2) # print (p,c0,c1,hk) #coords = np.array([xx+p[0],yy+p[1]]) coords = np.array([xx + (c0 - hk), yy + (c1 - hk)]) #print (c0,c1) #import pylab as plt #plt.imshow(sub2, interpolation='none') # shift array: sub = map_coordinates(sub, coords, mode='nearest').reshape(k, k) # plt.figure(2) #plt.imshow(sub, interpolation='none') # plt.show() #normalize: bg = 0.25* ( sub[0].mean() + sub[-1].mean() + sub[:,0].mean() + sub[:,-1].mean()) sub-=bg sub /= sub.max() # import pylab as plt # plt.figure(20) # plt.imshow(sub, interpolation='none') # # plt.figure(21) # # plt.imshow(sub2, interpolation='none') # plt.show() self._psf += sub if self.calc_std: self.subs.append(sub) except ValueError: pass
img - background, flat field, ste corrected image roi - [(x1,y1),...,(x4,y4)] - boundaries where points are
entailment
def interpolate2dStructuredFastIDW(grid, mask, kernel=15, power=2, minnvals=5): ''' FASTER IMPLEMENTATION OF interpolate2dStructuredIDW replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] [minvals] -> minimum number of neighbour values to find until interpolation stops ''' indices, dist = growPositions(kernel) weights = 1 / dist**(0.5 * power) return _calc(grid, mask, indices, weights, minnvals - 1)
FASTER IMPLEMENTATION OF interpolate2dStructuredIDW replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] [minvals] -> minimum number of neighbour values to find until interpolation stops
entailment
def linearBlend(img1, img2, overlap, backgroundColor=None): ''' Stitch 2 images vertically together. Smooth the overlap area of both images with a linear fade from img1 to img2 @param img1: numpy.2dArray @param img2: numpy.2dArray of the same shape[1,2] as img1 @param overlap: number of pixels both images overlap @returns: stitched-image ''' (sizex, sizey) = img1.shape[:2] overlapping = True if overlap < 0: overlapping = False overlap = -overlap # linear transparency change: alpha = np.tile(np.expand_dims(np.linspace(1, 0, overlap), 1), sizey) if len(img2.shape) == 3: # multi channel img like rgb # make alpha 3d with n channels alpha = np.dstack(([alpha for _ in range(img2.shape[2])])) if overlapping: img1_cut = img1[sizex - overlap:sizex, :] img2_cut = img2[0:overlap, :] else: # take average of last 5 rows: img1_cut = np.tile(img1[-min(sizex, 5):, :].mean( axis=0), (overlap, 1)).reshape(alpha.shape) img2_cut = np.tile(img2[:min(img2.shape[0], 5), :].mean( axis=0), (overlap, 1)).reshape(alpha.shape) # fill intermediate area as mixture of both images #################bg transparent############ inter = (img1_cut * alpha + img2_cut * (1 - alpha)).astype(img1.dtype) # set background areas to value of respective other img: if backgroundColor is not None: mask = np.logical_and(img1_cut == backgroundColor, img2_cut != backgroundColor) inter[mask] = img2_cut[mask] mask = np.logical_and(img2_cut == backgroundColor, img1_cut != backgroundColor) inter[mask] = img1_cut[mask] if not overlapping: overlap = 0 return np.vstack((img1[0:sizex - overlap, :], inter, img2[overlap:, :]))
Stitch 2 images vertically together. Smooth the overlap area of both images with a linear fade from img1 to img2 @param img1: numpy.2dArray @param img2: numpy.2dArray of the same shape[1,2] as img1 @param overlap: number of pixels both images overlap @returns: stitched-image
entailment
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2, maxIter=1e5, copy=True): ''' same as interpolate2dStructuredIDW but using the point spread method this is faster if there are bigger connected masked areas and the border length is smaller replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] [copy] -> False: a bit faster, but modifies 'grid' and 'mask' ''' assert grid.shape == mask.shape, 'grid and mask shape are different' border = np.zeros(shape=mask.shape, dtype=np.bool) if copy: # copy mask as well because if will be modified later: mask = mask.copy() grid = grid.copy() return _calc(grid, mask, border, kernel, power, maxIter)
same as interpolate2dStructuredIDW but using the point spread method this is faster if there are bigger connected masked areas and the border length is smaller replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] [copy] -> False: a bit faster, but modifies 'grid' and 'mask'
entailment
def SNRaverage(snr, method='average', excludeBackground=True, checkBackground=True, backgroundLevel=None): ''' average a signal-to-noise map :param method: ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal :type method: str :param checkBackground: check whether there is actually a background level to exclude :type checkBackground: bool :returns: averaged SNR as float ''' if excludeBackground: # get background level if backgroundLevel is None: try: f = FitHistogramPeaks(snr).fitParams if checkBackground: if not hasBackground(f): excludeBackground = False if excludeBackground: backgroundLevel = getSignalMinimum(f) except (ValueError, AssertionError): backgroundLevel = snr.min() if excludeBackground: snr = snr[snr >= backgroundLevel] if method == 'RMS': avg = (snr**2).mean()**0.5 elif method == 'average': avg = snr.mean() # if np.isnan(avg): # avg = np.nanmean(snr) elif method == 'median': avg = np.median(snr) # if np.isnan(avg): # avg = np.nanmedian(snr) elif method == 'X75': r = (snr.min(), snr.max()) hist, bin_edges = np.histogram(snr, bins=2 * int(r[1] - r[0]), range=r) hist = np.asfarray(hist) / hist.sum() cdf = np.cumsum(hist) i = np.argmax(cdf > 0.25) avg = bin_edges[i] else: raise NotImplemented("given SNR average doesn't exist") return avg
average a signal-to-noise map :param method: ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal :type method: str :param checkBackground: check whether there is actually a background level to exclude :type checkBackground: bool :returns: averaged SNR as float
entailment
def maskedConvolve(arr, kernel, mask, mode='reflect'): ''' same as scipy.ndimage.convolve but is only executed on mask==True ... which should speed up everything ''' arr2 = extendArrayForConvolution(arr, kernel.shape, modex=mode, modey=mode) print(arr2.shape) out = np.zeros_like(arr) return _calc(arr2, kernel, mask, out)
same as scipy.ndimage.convolve but is only executed on mask==True ... which should speed up everything
entailment
def SNR(img1, img2=None, bg=None, noise_level_function=None, constant_noise_level=False, imgs_to_be_averaged=False): ''' Returns a signal-to-noise-map uses algorithm as described in BEDRICH 2016 JPV (not jet published) :param constant_noise_level: True, to assume noise to be constant :param imgs_to_be_averaged: True, if SNR is for average(img1, img2) ''' # dark current subtraction: img1 = np.asfarray(img1) if bg is not None: img1 = img1 - bg # SIGNAL: if img2 is not None: img2_exists = True img2 = np.asfarray(img2) - bg # signal as average on both images signal = 0.5 * (img1 + img2) else: img2_exists = False signal = img1 # denoise: signal = median_filter(signal, 3) # NOISE if constant_noise_level: # CONSTANT NOISE if img2_exists: d = img1 - img2 # 0.5**0.5 because of sum of variances noise = 0.5**0.5 * np.mean(np.abs((d))) * F_RMS2AAD else: d = (img1 - signal) * F_NOISE_WITH_MEDIAN noise = np.mean(np.abs(d)) * F_RMS2AAD else: # NOISE LEVEL FUNCTION if noise_level_function is None: noise_level_function, _ = oneImageNLF(img1, img2, signal) noise = noise_level_function(signal) noise[noise < 1] = 1 # otherwise SNR could be higher than image value if imgs_to_be_averaged: # SNR will be higher if both given images are supposed to be averaged: # factor of noise reduction if SNR if for average(img1, img2): noise *= 0.5**0.5 # BACKGROUND estimation and removal if background not given: if bg is None: bg = getBackgroundLevel(img1) signal -= bg snr = signal / noise # limit to 1, saying at these points signal=noise: snr[snr < 1] = 1 return snr
Returns a signal-to-noise-map uses algorithm as described in BEDRICH 2016 JPV (not jet published) :param constant_noise_level: True, to assume noise to be constant :param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
entailment
def sortCorners(corners): ''' sort the corners of a given quadrilateral of the type corners : [ [xi,yi],... ] to an anti-clockwise order starting with the bottom left corner or (if plotted as image where y increases to the bottom): clockwise, starting top left ''' corners = np.asarray(corners) # bring edges in order: corners2 = corners[ConvexHull(corners).vertices] if len(corners2) == 3: # sometimes ConvexHull one point is missing because it is # within the hull triangle # find the right position of set corner as the minimum perimeter # built with that point as different indices for c in corners: if c not in corners2: break perimeter = [] for n in range(0, 4): corners3 = np.insert(corners2, n, c, axis=0) perimeter.append( np.linalg.norm( np.diff( corners3, axis=0), axis=1).sum()) n = np.argmin(perimeter) corners2 = np.insert(corners2, n, c, axis=0) # find the edge with the right angle to the quad middle: mn = corners2.mean(axis=0) d = (corners2 - mn) ascent = np.arctan2(d[:, 1], d[:, 0]) bl = np.abs(BL_ANGLE + ascent).argmin() # build a index list starting with bl: i = list(range(bl, 4)) i.extend(list(range(0, bl))) return corners2[i]
sort the corners of a given quadrilateral of the type corners : [ [xi,yi],... ] to an anti-clockwise order starting with the bottom left corner or (if plotted as image where y increases to the bottom): clockwise, starting top left
entailment
def closestDirectDistance(arr, ksize=30, dtype=np.uint16): ''' return an array with contains the closest distance to the next positive value given in arr within a given kernel size ''' out = np.zeros_like(arr, dtype=dtype) _calc(out, arr, ksize) return out
return an array with contains the closest distance to the next positive value given in arr within a given kernel size
entailment
def closestConnectedDistance(target, walls=None, max_len_border_line=500, max_n_path=100, concentrate_every_n_pixel=1): ''' returns an array with contains the closest distance from every pixel the next position where target == 1 [walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target [target] binary 2darray - positions given by 1 [concentrate_every_n_pixel] often the distance of neighbour pixels is similar to speed up calculation set this value to e.g. 3 to calculate only the distance for every 3. pixel and interpolate in between recommended are values up to 3-5 [max_len_border_line] this function calculates distances travelled using region growth e.g. 0123 1123 2223 3333 the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited length defined in 'max_len_border_line' [max_n_path] how many paths are possible between every pixel and the target only needed if fast==False ''' c = concentrate_every_n_pixel assert c >= 1 if walls is None: walls = np.zeros_like(target, dtype=bool) s = target.shape dt = np.uint16 if max(target.shape) < 200: dt = np.uint8 out = np.zeros((s[0] // c, s[1] // c), dtype=dt) # temporary arrays: growth = np.zeros_like(target, dtype=dt) res = np.empty(shape=3, dtype=dt) steps = np.empty(shape=(max_len_border_line, 2), dtype=dt) new_steps = np.empty(shape=(max_len_border_line, 2), dtype=dt) # run calculation: _calc(growth, out, walls, target, steps, new_steps, res, concentrate_every_n_pixel) if c > 1: # if concentrate_every_n_pixel > 1 # the resized output array # will have wrong values close to the wall # therefore substitute all wall value (-1) # with an average of their closest neighbours interpolate2dStructuredIDW(out, out == 0) out = cv2.resize(out, s[::-1]) out[walls] = 0 return out
returns an array with contains the closest distance from every pixel the next position where target == 1 [walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target [target] binary 2darray - positions given by 1 [concentrate_every_n_pixel] often the distance of neighbour pixels is similar to speed up calculation set this value to e.g. 3 to calculate only the distance for every 3. pixel and interpolate in between recommended are values up to 3-5 [max_len_border_line] this function calculates distances travelled using region growth e.g. 0123 1123 2223 3333 the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited length defined in 'max_len_border_line' [max_n_path] how many paths are possible between every pixel and the target only needed if fast==False
entailment
def _grow(growth, walls, target, i, j, steps, new_steps, res): ''' fills [res] with [distance to next position where target == 1, x coord., y coord. of that position in target] using region growth i,j -> pixel position growth -> a work array, needed to measure the distance steps, new_steps -> current and last positions of the region growth steps using this instead of looking for the right step position in [growth] should speed up the process ''' # clean array: growth[:] = 0 if target[i, j]: # pixel is in target res[0] = 1 res[1] = i res[2] = j return step = 1 s0, s1 = growth.shape step_len = 1 new_step_ind = 0 steps[new_step_ind, 0] = i steps[new_step_ind, 1] = j growth[i, j] = 1 while True: for n in range(step_len): i, j = steps[n] for ii, jj in DIRECT_NEIGHBOURS: pi = i + ii pj = j + jj # if in image: if 0 <= pi < s0 and 0 <= pj < s1: # is growth array is empty and there are no walls: # fill growth with current step if growth[pi, pj] == 0 and not walls[pi, pj]: growth[pi, pj] = step if target[pi, pj]: # found destination res[0] = 1 res[1] = pi res[2] = pj return new_steps[new_step_ind, 0] = pi new_steps[new_step_ind, 1] = pj new_step_ind += 1 if new_step_ind == 0: # couldn't populate any more because growth is full # and all possible steps are gone res[0] = 0 return step += 1 steps, new_steps = new_steps, steps step_len = new_step_ind new_step_ind = 0
fills [res] with [distance to next position where target == 1, x coord., y coord. of that position in target] using region growth i,j -> pixel position growth -> a work array, needed to measure the distance steps, new_steps -> current and last positions of the region growth steps using this instead of looking for the right step position in [growth] should speed up the process
entailment
def polylinesFromBinImage(img, minimum_cluster_size=6, remove_small_obj_size=3, reconnect_size=3, max_n_contours=None, max_len_contour=None, copy=True): ''' return a list of arrays of un-branching contours img -> (boolean) array optional: --------- minimum_cluster_size -> minimum number of pixels connected together to build a contour ##search_kernel_size -> TODO ##min_search_kernel_moment -> TODO numeric: ------------- max_n_contours -> maximum number of possible contours in img max_len_contour -> maximum contour length ''' assert minimum_cluster_size > 1 assert reconnect_size % 2, 'ksize needs to be odd' # assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd' # assume array size parameters, is not given: if max_n_contours is None: max_n_contours = max(img.shape) if max_len_contour is None: max_len_contour = sum(img.shape[:2]) # array containing coord. of all contours: contours = np.zeros(shape=(max_n_contours, max_len_contour, 2), dtype=np.uint16) # if not search_kernel_size else np.float32) if img.dtype != np.bool: img = img.astype(bool) elif copy: img = img.copy() if remove_small_obj_size: remove_small_objects(img, remove_small_obj_size, connectivity=2, in_place=True) if reconnect_size: # remove gaps maximum_filter(img, reconnect_size, output=img) # reduce contour width to 1 img = skeletonize(img) n_contours = _populateContoursArray(img, contours, minimum_cluster_size) contours = contours[:n_contours] l = [] for c in contours: ind = np.zeros(shape=len(c), dtype=bool) _getValidInd(c, ind) # remove all empty spaces: l.append(c[ind]) return l
return a list of arrays of un-branching contours img -> (boolean) array optional: --------- minimum_cluster_size -> minimum number of pixels connected together to build a contour ##search_kernel_size -> TODO ##min_search_kernel_moment -> TODO numeric: ------------- max_n_contours -> maximum number of possible contours in img max_len_contour -> maximum contour length
entailment
def cdf(arr, pos=None): ''' Return the cumulative density function of a given array or its intensity at a given position (0-1) ''' r = (arr.min(), arr.max()) hist, bin_edges = np.histogram(arr, bins=2 * int(r[1] - r[0]), range=r) hist = np.asfarray(hist) / hist.sum() cdf = np.cumsum(hist) if pos is None: return cdf i = np.argmax(cdf > pos) return bin_edges[i]
Return the cumulative density function of a given array or its intensity at a given position (0-1)
entailment
def subCell2DGenerator(arr, shape, d01=None, p01=None): '''Generator to access evenly sized sub-cells in a 2d array Args: shape (tuple): number of sub-cells in y,x e.g. (10,15) d01 (tuple, optional): cell size in y and x p01 (tuple, optional): position of top left edge Returns: int: 1st index int: 2nd index array: sub array Example: >>> a = np.array([[[0,1],[1,2]],[[2,3],[3,4]]]) >>> gen = subCell2DGenerator(a,(2,2)) >>> for i,j, sub in gen: print( i,j, sub ) 0 0 [[[0 1]]] 0 1 [[[1 2]]] 1 0 [[[2 3]]] 1 1 [[[3 4]]] ''' for i, j, s0, s1 in subCell2DSlices(arr, shape, d01, p01): yield i, j, arr[s0, s1]
Generator to access evenly sized sub-cells in a 2d array Args: shape (tuple): number of sub-cells in y,x e.g. (10,15) d01 (tuple, optional): cell size in y and x p01 (tuple, optional): position of top left edge Returns: int: 1st index int: 2nd index array: sub array Example: >>> a = np.array([[[0,1],[1,2]],[[2,3],[3,4]]]) >>> gen = subCell2DGenerator(a,(2,2)) >>> for i,j, sub in gen: print( i,j, sub ) 0 0 [[[0 1]]] 0 1 [[[1 2]]] 1 0 [[[2 3]]] 1 1 [[[3 4]]]
entailment
def subCell2DSlices(arr, shape, d01=None, p01=None): '''Generator to access evenly sized sub-cells in a 2d array Args: shape (tuple): number of sub-cells in y,x e.g. (10,15) d01 (tuple, optional): cell size in y and x p01 (tuple, optional): position of top left edge Returns: int: 1st index int: 2nd index slice: first dimension slice: 1st dimension ''' if p01 is not None: yinit, xinit = p01 else: xinit, yinit = 0, 0 x, y = xinit, yinit g0, g1 = shape s0, s1 = arr.shape[:2] if d01 is not None: d0, d1 = d01 else: d0, d1 = s0 / g0, s1 / g1 y1 = d0 + yinit for i in range(g0): for j in range(g1): x1 = x + d1 yield (i, j, slice(max(0, _rint(y)), max(0, _rint(y1))), slice(max(0, _rint(x)), max(0, _rint(x1)))) x = x1 y = y1 y1 = y + d0 x = xinit
Generator to access evenly sized sub-cells in a 2d array Args: shape (tuple): number of sub-cells in y,x e.g. (10,15) d01 (tuple, optional): cell size in y and x p01 (tuple, optional): position of top left edge Returns: int: 1st index int: 2nd index slice: first dimension slice: 1st dimension
entailment
def subCell2DCoords(*args, **kwargs): '''Same as subCell2DSlices but returning coordinates Example: g = subCell2DCoords(arr, shape) for x, y in g: plt.plot(x, y) ''' for _, _, s0, s1 in subCell2DSlices(*args, **kwargs): yield ((s1.start, s1.start, s1.stop), (s0.start, s0.stop, s0.stop))
Same as subCell2DSlices but returning coordinates Example: g = subCell2DCoords(arr, shape) for x, y in g: plt.plot(x, y)
entailment
def subCell2DFnArray(arr, fn, shape, dtype=None, **kwargs): ''' Return array where every cell is the output of a given cell function Args: fn (function): ...to be executed on all sub-arrays Returns: array: value of every cell equals result of fn(sub-array) Example: mx = subCell2DFnArray(myArray, np.max, (10,6) ) - -> here mx is a 2d array containing all cell maxima ''' sh = list(arr.shape) sh[:2] = shape out = np.empty(sh, dtype=dtype) for i, j, c in subCell2DGenerator(arr, shape, **kwargs): out[i, j] = fn(c) return out
Return array where every cell is the output of a given cell function Args: fn (function): ...to be executed on all sub-arrays Returns: array: value of every cell equals result of fn(sub-array) Example: mx = subCell2DFnArray(myArray, np.max, (10,6) ) - -> here mx is a 2d array containing all cell maxima
entailment
def defocusThroughDepth(u, uf, f, fn, k=2.355): ''' return the defocus (mm std) through DOF u -> scene point (depth value) uf -> in-focus position (the distance at which the scene point should be placed in order to be focused) f -> focal length k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian fn --> f-number (relative aperture) equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736 Pertuz et.al. "Analysis of focus measure operators for shape-from-focus" all parameter should be in same physical unit [mm] !! assumes spatial invariant blur ''' # A = f/fn return (k/fn) * (f**2*abs(u-uf)) / (u*(uf-f))
return the defocus (mm std) through DOF u -> scene point (depth value) uf -> in-focus position (the distance at which the scene point should be placed in order to be focused) f -> focal length k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian fn --> f-number (relative aperture) equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736 Pertuz et.al. "Analysis of focus measure operators for shape-from-focus" all parameter should be in same physical unit [mm] !! assumes spatial invariant blur
entailment
def extendArrayForConvolution(arr, kernelXY, modex='reflect', modey='reflect'): ''' extends a given array right right border handling for convolution -->in opposite to skimage and skipy this function allows to chose different mode = ('reflect', 'wrap') in x and y direction only supports 'warp' and 'reflect' at the moment ''' (kx, ky) = kernelXY kx//=2 ky//=2 #indexing 0:-0 leads to empty arrays and not the whole thing #make it easy with assuming ksize=1 and removing extra size later: nokx = kx == 0 noky = ky == 0 if nokx: kx = 1 if noky: ky = 1 s0,s1 = arr.shape assert ky < s0 assert kx < s1 arr2 = np.zeros((s0+2*ky, s1+2*kx), dtype=arr.dtype) if kx == 0: kx = None arr2[ky:-ky,kx:-kx]=arr #original array: t = arr[:ky] #TOP rb = arr[-1:-ky-1:-1] #reverse bottom rt = arr[ky-1::-1] #reverse top rr = arr[:,-1:-kx-1:-1] #reverse right l = arr[:,:kx] #left # rtl = arr[ky-1::-1,kx-1::-1] #filter array: tm2 = arr2[:ky , kx:-kx] #TOP-MIDDLE bm2 = arr2[-ky:, kx:-kx] #BOTTOM-MIDDLE tl2 = arr2[:ky , :kx] #TOP-LEFT bl2 = arr2[-ky:, :kx] #BOTTOM-LEFT tr2 = arr2[:ky:, -kx:]#TOP-RIGHT br2 = arr2[-ky:, -kx:]#TOP-RIGHT #fill edges: if modey == 'warp': tm2[:] = t bm2[:] = rb tl2[:] = arr2[2*ky:ky:-1,:kx] bl2[:] = arr2[-ky-1:-2*ky-1:-1,:kx] #TODO: do other options!!! elif modey == 'reflect': tm2[:] = rt bm2[:] = rb if modex =='reflect': tl2[:] = arr[ky-1::-1,kx-1::-1] bl2[:] = arr[-1:-ky-1:-1,kx-1::-1] tr2[:] = arr[:ky,-kx:][::-1,::-1] br2[:] = arr[-ky:,-kx:][::-1,::-1] else:#warp tl2[:] = arr[ky-1::-1 , -kx:] bl2[:] = arr[-1:-ky-1:-1 , -kx:] tr2[:] = arr[ky-1::-1 , :kx] br2[:] = arr[-1:-ky-1:-1 , :kx] else: raise Exception('modey not supported') if modex == 'wrap': arr2[ky:-ky,kx-1::-1] = rr arr2[ky:-ky,-kx:] = l elif modex == 'reflect': arr2[ky:-ky,:kx] = l[:,::-1] arr2[ky:-ky,-kx:] = rr else: raise Exception('modex not supported') if nokx: arr2 = arr2[:,1:-1] if noky: arr2 = arr2[1:-1] return arr2
extends a given array right right border handling for convolution -->in opposite to skimage and skipy this function allows to chose different mode = ('reflect', 'wrap') in x and y direction only supports 'warp' and 'reflect' at the moment
entailment
def calibrate(self, board_size=(8, 6), method='Chessboard', images=[], max_images=100, sensorSize_mm=None, detect_sensible=True): ''' sensorSize_mm - (width, height) [mm] Physical size of the sensor ''' self._coeffs = {} self.opts = {'foundPattern': [], # whether pattern could be found for image 'size': board_size, 'imgs': [], # list of either npArrsays or img paths # list or 2d coords. of found pattern features (e.g. # chessboard corners) 'imgPoints': [] } self._detect_sensible = detect_sensible self.method = {'Chessboard': self._findChessboard, 'Symmetric circles': self._findSymmetricCircles, 'Asymmetric circles': self._findAsymmetricCircles, 'Manual': None # TODO: 'Image grid':FindGridInImage }[method] self.max_images = max_images self.findCount = 0 self.apertureSize = sensorSize_mm self.objp = self._mkObjPoints(board_size) if method == 'Asymmetric circles': # this pattern have its points (every 2. row) displaced, so: i = self.objp[:, 1] % 2 == 1 self.objp[:, 0] *= 2 self.objp[i, 0] += 1 # Arrays to store object points and image points from all the images. self.objpoints = [] # 3d point in real world space # self.imgpoints = [] # 2d points in image plane. self.mapx, self.mapy = None, None # from matplotlib import pyplot as plt for n, i in enumerate(images): print('working on image %s' % n) if self.addImg(i): print('OK')
sensorSize_mm - (width, height) [mm] Physical size of the sensor
entailment
def addPoints(self, points, board_size=None): ''' add corner points directly instead of extracting them from image points = ( (0,1), (...),... ) [x,y] ''' self.opts['foundPattern'].append(True) self.findCount += 1 if board_size is not None: self.objpoints.append(self._mkObjPoints(board_size)) else: self.objpoints.append(self.objp) s0 = points.shape[0] self.opts['imgPoints'].append(np.asarray(points).reshape( s0, 1, 2).astype(np.float32))
add corner points directly instead of extracting them from image points = ( (0,1), (...),... ) [x,y]
entailment
def setImgShape(self, shape): ''' image shape must be known for calculating camera matrix if method==Manual and addPoints is used instead of addImg this method must be called before .coeffs are obtained ''' self.img = type('Dummy', (object,), {}) # if imgProcessor.ARRAYS_ORDER_IS_XY: # self.img.shape = shape[::-1] # else: self.img.shape = shape
image shape must be known for calculating camera matrix if method==Manual and addPoints is used instead of addImg this method must be called before .coeffs are obtained
entailment
def addImgStream(self, img): ''' add images using a continous stream - stop when max number of images is reached ''' if self.findCount > self.max_images: raise EnoughImages('have enough images') return self.addImg(img)
add images using a continous stream - stop when max number of images is reached
entailment
def addImg(self, img): ''' add one chessboard image for detection lens distortion ''' # self.opts['imgs'].append(img) self.img = imread(img, 'gray', 'uint8') didFindCorners, corners = self.method() self.opts['foundPattern'].append(didFindCorners) if didFindCorners: self.findCount += 1 self.objpoints.append(self.objp) self.opts['imgPoints'].append(corners) return didFindCorners
add one chessboard image for detection lens distortion
entailment
def getCoeffStr(self): ''' get the distortion coeffs in a formated string ''' txt = '' for key, val in self.coeffs.items(): txt += '%s = %s\n' % (key, val) return txt
get the distortion coeffs in a formated string
entailment
def drawChessboard(self, img=None): ''' draw a grid fitting to the last added image on this one or an extra image img == None ==False -> draw chessbord on empty image ==img ''' assert self.findCount > 0, 'cannot draw chessboard if nothing found' if img is None: img = self.img elif isinstance(img, bool) and not img: img = np.zeros(shape=(self.img.shape), dtype=self.img.dtype) else: img = imread(img, dtype='uint8') gray = False if img.ndim == 2: gray = True # need a color 8 bit image img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Draw and display the corners cv2.drawChessboardCorners(img, self.opts['size'], self.opts['imgPoints'][-1], self.opts['foundPattern'][-1]) if gray: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return img
draw a grid fitting to the last added image on this one or an extra image img == None ==False -> draw chessbord on empty image ==img
entailment
def writeToFile(self, filename, saveOpts=False): ''' write the distortion coeffs to file saveOpts --> Whether so save calibration options (and not just results) ''' try: if not filename.endswith('.%s' % self.ftype): filename += '.%s' % self.ftype s = {'coeffs': self.coeffs} if saveOpts: s['opts'] = self.opts # else: # s['opts':{}] np.savez(filename, **s) return filename except AttributeError: raise Exception( 'need to calibrate camera before calibration can be saved to file')
write the distortion coeffs to file saveOpts --> Whether so save calibration options (and not just results)
entailment
def readFromFile(self, filename): ''' read the distortion coeffs from file ''' s = dict(np.load(filename)) try: self.coeffs = s['coeffs'][()] except KeyError: #LEGENCY - remove self.coeffs = s try: self.opts = s['opts'][()] except KeyError: pass return self.coeffs
read the distortion coeffs from file
entailment
def undistortPoints(self, points, keepSize=False): ''' points --> list of (x,y) coordinates ''' s = self.img.shape cam = self.coeffs['cameraMatrix'] d = self.coeffs['distortionCoeffs'] pts = np.asarray(points, dtype=np.float32) if pts.ndim == 2: pts = np.expand_dims(pts, axis=0) (newCameraMatrix, roi) = cv2.getOptimalNewCameraMatrix(cam, d, s[::-1], 1, s[::-1]) if not keepSize: xx, yy = roi[:2] pts[0, 0] -= xx pts[0, 1] -= yy return cv2.undistortPoints(pts, cam, d, P=newCameraMatrix)
points --> list of (x,y) coordinates
entailment
def correct(self, image, keepSize=False, borderValue=0): ''' remove lens distortion from given image ''' image = imread(image) (h, w) = image.shape[:2] mapx, mapy = self.getUndistortRectifyMap(w, h) self.img = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=borderValue ) if not keepSize: xx, yy, ww, hh = self.roi self.img = self.img[yy: yy + hh, xx: xx + ww] return self.img
remove lens distortion from given image
entailment
def distortImage(self, image): ''' opposite of 'correct' ''' image = imread(image) (imgHeight, imgWidth) = image.shape[:2] mapx, mapy = self.getDistortRectifyMap(imgWidth, imgHeight) return cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR, borderValue=(0, 0, 0))
opposite of 'correct'
entailment
def getCameraParams(self): ''' value positions based on http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap ''' c = self.coeffs['cameraMatrix'] fx = c[0][0] fy = c[1][1] cx = c[0][2] cy = c[1][2] k1, k2, p1, p2, k3 = tuple(self.coeffs['distortionCoeffs'].tolist()[0]) return fx, fy, cx, cy, k1, k2, k3, p1, p2
value positions based on http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap
entailment
def standardUncertainties(self, sharpness=0.5): ''' sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error ''' height, width = self.coeffs['shape'] fx, fy = self.getDeflection(width, height) # is RMSE of imgPoint-projectedPoints r = self.coeffs['reprojectionError'] t = (sharpness**2 + r**2)**0.5 return fx * t, fy * t
sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error
entailment
def edgesFromBoolImg(arr, dtype=None): ''' takes a binary image (usually a mask) and returns the edges of the object inside ''' out = np.zeros_like(arr, dtype=dtype) _calc(arr, out) _calc(arr.T, out.T) return out
takes a binary image (usually a mask) and returns the edges of the object inside
entailment
def draw_matches(img1, kp1, img2, kp2, matches, color=None, thickness=2, r=15): """Draws lines between matching keypoints of two images. Keypoints not in a matching pair are not drawn. Places the images side by side in a new image and draws circles around each keypoint, with line segments connecting matching pairs. You can tweak the r, thickness, and figsize values as needed. Args: img1: An openCV image ndarray in a grayscale or color format. kp1: A list of cv2.KeyPoint objects for img1. img2: An openCV image ndarray of the same format and with the same element type as img1. kp2: A list of cv2.KeyPoint objects for img2. matches: A list of DMatch objects whose trainIdx attribute refers to img1 keypoints and whose queryIdx attribute refers to img2 keypoints. color: The color of the circles and connecting lines drawn on the images. A 3-tuple for color images, a scalar for grayscale images. If None, these values are randomly generated. """ # We're drawing them side by side. Get dimensions accordingly. # Handle both color and grayscale images. if len(img1.shape) == 3: new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[ 1] + img2.shape[1], img1.shape[2]) elif len(img1.shape) == 2: new_shape = ( max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1]) new_img = np.zeros(new_shape, type(img1.flat[0])) # Place images onto the new image. new_img[0:img1.shape[0], 0:img1.shape[1]] = img1 new_img[0:img2.shape[0], img1.shape[1] :img1.shape[1] + img2.shape[1]] = img2 # Draw lines between matches. Make sure to offset kp coords in second # image appropriately. if color: c = color for m in matches: # Generate random color for RGB/BGR and grayscale images as needed. if not color: c = np.random.randint(0, 256, 3) if len( img1.shape) == 3 else np.random.randint(0, 256) # So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things, # wants locs as a tuple of ints. end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int)) end2 = tuple(np.round(kp2[m.queryIdx].pt).astype( int) + np.array([img1.shape[1], 0])) cv2.line(new_img, end1, end2, c, thickness) cv2.circle(new_img, end1, r, c, thickness) cv2.circle(new_img, end2, r, c, thickness) return new_img
Draws lines between matching keypoints of two images. Keypoints not in a matching pair are not drawn. Places the images side by side in a new image and draws circles around each keypoint, with line segments connecting matching pairs. You can tweak the r, thickness, and figsize values as needed. Args: img1: An openCV image ndarray in a grayscale or color format. kp1: A list of cv2.KeyPoint objects for img1. img2: An openCV image ndarray of the same format and with the same element type as img1. kp2: A list of cv2.KeyPoint objects for img2. matches: A list of DMatch objects whose trainIdx attribute refers to img1 keypoints and whose queryIdx attribute refers to img2 keypoints. color: The color of the circles and connecting lines drawn on the images. A 3-tuple for color images, a scalar for grayscale images. If None, these values are randomly generated.
entailment
def _scaleTo8bit(self, img): ''' The pattern comparator need images to be 8 bit -> find the range of the signal and scale the image ''' r = scaleSignalCutParams(img, 0.02) # , nSigma=3) self.signal_ranges.append(r) return toUIntArray(img, dtype=np.uint8, range=r)
The pattern comparator need images to be 8 bit -> find the range of the signal and scale the image
entailment
def findHomography(self, img, drawMatches=False): ''' Find homography of the image through pattern comparison with the base image ''' print("\t Finding points...") # Find points in the next frame img = self._prepareImage(img) features, descs = self.detector.detectAndCompute(img, None) ###################### # TODO: CURRENTLY BROKEN IN OPENCV3.1 - WAITNG FOR NEW RELEASE 3.2 # matches = self.matcher.knnMatch(descs,#.astype(np.float32), # self.base_descs, # k=3) # print("\t Match Count: ", len(matches)) # matches_subset = self._filterMatches(matches) # its working alternative (for now): bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) matches_subset = bf.match(descs, self.base_descs) ###################### # matches = bf.knnMatch(descs,self.base_descs, k=2) # # Apply ratio test # matches_subset = [] # medDist = np.median([m.distance for m in matches]) # matches_subset = [m for m in matches if m.distance < medDist] # for m in matches: # print(m.distance) # for m,n in matches: # if m.distance < 0.75*n.distance: # matches_subset.append([m]) if not len(matches_subset): raise Exception('no matches found') print("\t Filtered Match Count: ", len(matches_subset)) distance = sum([m.distance for m in matches_subset]) print("\t Distance from Key Image: ", distance) averagePointDistance = distance / (len(matches_subset)) print("\t Average Distance: ", averagePointDistance) kp1 = [] kp2 = [] for match in matches_subset: kp1.append(self.base_features[match.trainIdx]) kp2.append(features[match.queryIdx]) # /self._fH #scale with _fH, if image was resized p1 = np.array([k.pt for k in kp1]) p2 = np.array([k.pt for k in kp2]) # /self._fH H, status = cv2.findHomography(p1, p2, cv2.RANSAC, # method 5.0 # max reprojection error (1...10) ) if status is None: raise Exception('no homography found') else: inliers = np.sum(status) print('%d / %d inliers/matched' % (inliers, len(status))) inlierRatio = inliers / len(status) if self.minInlierRatio > inlierRatio or inliers < self.minInliers: raise Exception('bad fit!') # scale with _fH, if image was resized # see # http://answers.opencv.org/question/26173/the-relationship-between-homography-matrix-and-scaling-images/ s = np.eye(3, 3) s[0, 0] = 1 / self._fH s[1, 1] = 1 / self._fH H = s.dot(H).dot(np.linalg.inv(s)) if drawMatches: # s0,s1 = img.shape # out = np.empty(shape=(s0,s1,3), dtype=np.uint8) img = draw_matches(self.base8bit, self.base_features, img, features, matches_subset[:20], # None,#out, # flags=2 thickness=5 ) return (H, inliers, inlierRatio, averagePointDistance, img, features, descs, len(matches_subset))
Find homography of the image through pattern comparison with the base image
entailment
def patCircles(s0): '''make circle array''' arr = np.zeros((s0,s0), dtype=np.uint8) col = 255 for rad in np.linspace(s0,s0/7.,10): cv2.circle(arr, (0,0), int(round(rad)), color=col, thickness=-1, lineType=cv2.LINE_AA ) if col: col = 0 else: col = 255 return arr.astype(float)
make circle array
entailment
def patCrossLines(s0): '''make line pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) col = 255 t = int(s0/100.) for pos in np.logspace(0.01,1,10): pos = int(round((pos-0.5)*s0/10.)) cv2.line(arr, (0,pos), (s0,pos), color=col, thickness=t, lineType=cv2.LINE_AA ) cv2.line(arr, (pos,0), (pos,s0), color=col, thickness=t, lineType=cv2.LINE_AA ) return arr.astype(float)
make line pattern
entailment