repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
radjkarl/imgProcessor
imgProcessor/generate/patterns.py
patStarLines
def patStarLines(s0): '''make line pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) col = 255 t = int(s0/100.) for pos in np.linspace(0,np.pi/2,15): p0 = int(round(np.sin(pos)*s0*2)) p1 = int(round(np.cos(pos)*s0*2)) cv2.line(arr,(0,0),(p0,p1), color=col, thickness=t, lineType=cv2.LINE_AA ) return arr.astype(float)
python
def patStarLines(s0): '''make line pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) col = 255 t = int(s0/100.) for pos in np.linspace(0,np.pi/2,15): p0 = int(round(np.sin(pos)*s0*2)) p1 = int(round(np.cos(pos)*s0*2)) cv2.line(arr,(0,0),(p0,p1), color=col, thickness=t, lineType=cv2.LINE_AA ) return arr.astype(float)
[ "def", "patStarLines", "(", "s0", ")", ":", "arr", "=", "np", ".", "zeros", "(", "(", "s0", ",", "s0", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "col", "=", "255", "t", "=", "int", "(", "s0", "/", "100.", ")", "for", "pos", "in", "np", ".", "linspace", "(", "0", ",", "np", ".", "pi", "/", "2", ",", "15", ")", ":", "p0", "=", "int", "(", "round", "(", "np", ".", "sin", "(", "pos", ")", "*", "s0", "*", "2", ")", ")", "p1", "=", "int", "(", "round", "(", "np", ".", "cos", "(", "pos", ")", "*", "s0", "*", "2", ")", ")", "cv2", ".", "line", "(", "arr", ",", "(", "0", ",", "0", ")", ",", "(", "p0", ",", "p1", ")", ",", "color", "=", "col", ",", "thickness", "=", "t", ",", "lineType", "=", "cv2", ".", "LINE_AA", ")", "return", "arr", ".", "astype", "(", "float", ")" ]
make line pattern
[ "make", "line", "pattern" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/generate/patterns.py#L68-L80
radjkarl/imgProcessor
imgProcessor/generate/patterns.py
patSiemensStar
def patSiemensStar(s0, n=72, vhigh=255, vlow=0, antiasing=False): '''make line pattern''' arr = np.full((s0,s0),vlow, dtype=np.uint8) c = int(round(s0/2.)) s = 2*np.pi/(2*n) step = 0 for i in range(2*n): p0 = round(c+np.sin(step)*2*s0) p1 = round(c+np.cos(step)*2*s0) step += s p2 = round(c+np.sin(step)*2*s0) p3 = round(c+np.cos(step)*2*s0) pts = np.array(((c,c), (p0,p1), (p2,p3) ), dtype=int) cv2.fillConvexPoly(arr, pts, color=vhigh if i%2 else vlow, lineType=cv2.LINE_AA if antiasing else 0) arr[c,c]=0 return arr.astype(float)
python
def patSiemensStar(s0, n=72, vhigh=255, vlow=0, antiasing=False): '''make line pattern''' arr = np.full((s0,s0),vlow, dtype=np.uint8) c = int(round(s0/2.)) s = 2*np.pi/(2*n) step = 0 for i in range(2*n): p0 = round(c+np.sin(step)*2*s0) p1 = round(c+np.cos(step)*2*s0) step += s p2 = round(c+np.sin(step)*2*s0) p3 = round(c+np.cos(step)*2*s0) pts = np.array(((c,c), (p0,p1), (p2,p3) ), dtype=int) cv2.fillConvexPoly(arr, pts, color=vhigh if i%2 else vlow, lineType=cv2.LINE_AA if antiasing else 0) arr[c,c]=0 return arr.astype(float)
[ "def", "patSiemensStar", "(", "s0", ",", "n", "=", "72", ",", "vhigh", "=", "255", ",", "vlow", "=", "0", ",", "antiasing", "=", "False", ")", ":", "arr", "=", "np", ".", "full", "(", "(", "s0", ",", "s0", ")", ",", "vlow", ",", "dtype", "=", "np", ".", "uint8", ")", "c", "=", "int", "(", "round", "(", "s0", "/", "2.", ")", ")", "s", "=", "2", "*", "np", ".", "pi", "/", "(", "2", "*", "n", ")", "step", "=", "0", "for", "i", "in", "range", "(", "2", "*", "n", ")", ":", "p0", "=", "round", "(", "c", "+", "np", ".", "sin", "(", "step", ")", "*", "2", "*", "s0", ")", "p1", "=", "round", "(", "c", "+", "np", ".", "cos", "(", "step", ")", "*", "2", "*", "s0", ")", "step", "+=", "s", "p2", "=", "round", "(", "c", "+", "np", ".", "sin", "(", "step", ")", "*", "2", "*", "s0", ")", "p3", "=", "round", "(", "c", "+", "np", ".", "cos", "(", "step", ")", "*", "2", "*", "s0", ")", "pts", "=", "np", ".", "array", "(", "(", "(", "c", ",", "c", ")", ",", "(", "p0", ",", "p1", ")", ",", "(", "p2", ",", "p3", ")", ")", ",", "dtype", "=", "int", ")", "cv2", ".", "fillConvexPoly", "(", "arr", ",", "pts", ",", "color", "=", "vhigh", "if", "i", "%", "2", "else", "vlow", ",", "lineType", "=", "cv2", ".", "LINE_AA", "if", "antiasing", "else", "0", ")", "arr", "[", "c", ",", "c", "]", "=", "0", "return", "arr", ".", "astype", "(", "float", ")" ]
make line pattern
[ "make", "line", "pattern" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/generate/patterns.py#L83-L107
radjkarl/imgProcessor
imgProcessor/generate/patterns.py
patText
def patText(s0): '''make text pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) s = int(round(s0/100.)) p1 = 0 pp1 = int(round(s0/10.)) for pos0 in np.linspace(0,s0,10): cv2.putText(arr, 'helloworld', (p1,int(round(pos0))), cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s, color=255, thickness=s, lineType=cv2.LINE_AA ) if p1: p1 = 0 else: p1 = pp1 return arr.astype(float)
python
def patText(s0): '''make text pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) s = int(round(s0/100.)) p1 = 0 pp1 = int(round(s0/10.)) for pos0 in np.linspace(0,s0,10): cv2.putText(arr, 'helloworld', (p1,int(round(pos0))), cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s, color=255, thickness=s, lineType=cv2.LINE_AA ) if p1: p1 = 0 else: p1 = pp1 return arr.astype(float)
[ "def", "patText", "(", "s0", ")", ":", "arr", "=", "np", ".", "zeros", "(", "(", "s0", ",", "s0", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "s", "=", "int", "(", "round", "(", "s0", "/", "100.", ")", ")", "p1", "=", "0", "pp1", "=", "int", "(", "round", "(", "s0", "/", "10.", ")", ")", "for", "pos0", "in", "np", ".", "linspace", "(", "0", ",", "s0", ",", "10", ")", ":", "cv2", ".", "putText", "(", "arr", ",", "'helloworld'", ",", "(", "p1", ",", "int", "(", "round", "(", "pos0", ")", ")", ")", ",", "cv2", ".", "FONT_HERSHEY_COMPLEX_SMALL", ",", "fontScale", "=", "s", ",", "color", "=", "255", ",", "thickness", "=", "s", ",", "lineType", "=", "cv2", ".", "LINE_AA", ")", "if", "p1", ":", "p1", "=", "0", "else", ":", "p1", "=", "pp1", "return", "arr", ".", "astype", "(", "float", ")" ]
make text pattern
[ "make", "text", "pattern" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/generate/patterns.py#L140-L155
radjkarl/imgProcessor
imgProcessor/filters/removeSinglePixels.py
removeSinglePixels
def removeSinglePixels(img): ''' img - boolean array remove all pixels that have no neighbour ''' gx = img.shape[0] gy = img.shape[1] for i in range(gx): for j in range(gy): if img[i, j]: found_neighbour = False for ii in range(max(0, i - 1), min(gx, i + 2)): for jj in range(max(0, j - 1), min(gy, j + 2)): if ii == i and jj == j: continue if img[ii, jj]: found_neighbour = True break if found_neighbour: break if not found_neighbour: img[i, j] = 0
python
def removeSinglePixels(img): ''' img - boolean array remove all pixels that have no neighbour ''' gx = img.shape[0] gy = img.shape[1] for i in range(gx): for j in range(gy): if img[i, j]: found_neighbour = False for ii in range(max(0, i - 1), min(gx, i + 2)): for jj in range(max(0, j - 1), min(gy, j + 2)): if ii == i and jj == j: continue if img[ii, jj]: found_neighbour = True break if found_neighbour: break if not found_neighbour: img[i, j] = 0
[ "def", "removeSinglePixels", "(", "img", ")", ":", "gx", "=", "img", ".", "shape", "[", "0", "]", "gy", "=", "img", ".", "shape", "[", "1", "]", "for", "i", "in", "range", "(", "gx", ")", ":", "for", "j", "in", "range", "(", "gy", ")", ":", "if", "img", "[", "i", ",", "j", "]", ":", "found_neighbour", "=", "False", "for", "ii", "in", "range", "(", "max", "(", "0", ",", "i", "-", "1", ")", ",", "min", "(", "gx", ",", "i", "+", "2", ")", ")", ":", "for", "jj", "in", "range", "(", "max", "(", "0", ",", "j", "-", "1", ")", ",", "min", "(", "gy", ",", "j", "+", "2", ")", ")", ":", "if", "ii", "==", "i", "and", "jj", "==", "j", ":", "continue", "if", "img", "[", "ii", ",", "jj", "]", ":", "found_neighbour", "=", "True", "break", "if", "found_neighbour", ":", "break", "if", "not", "found_neighbour", ":", "img", "[", "i", ",", "j", "]", "=", "0" ]
img - boolean array remove all pixels that have no neighbour
[ "img", "-", "boolean", "array", "remove", "all", "pixels", "that", "have", "no", "neighbour" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/removeSinglePixels.py#L5-L33
radjkarl/imgProcessor
imgProcessor/interpolate/interpolateCircular2dStructuredIDW.py
interpolateCircular2dStructuredIDW
def interpolateCircular2dStructuredIDW(grid, mask, kernel=15, power=2, fr=1, fphi=1, cx=0, cy=0): ''' same as interpolate2dStructuredIDW but calculation distance to neighbour using polar coordinates fr, fphi --> weight factors for radian and radius differences cx,cy -> polar center of the array e.g. middle->(sx//2+1,sy//2+1) ''' gx = grid.shape[0] gy = grid.shape[0] #FOR EVERY PIXEL for i in range(gx): for j in range(gy): if mask[i,j]: xmn = i-kernel if xmn < 0: xmn = 0 xmx = i+kernel if xmx > gx: xmx = gx ymn = j-kernel if ymn < 0: ymn = 0 ymx = j+kernel if ymx > gx: ymx = gy sumWi = 0.0 value = 0.0 #radius and radian to polar center: R = ((i-cx)**2+(j-cy)**2)**0.5 PHI = atan2(j-cy, i-cx) #FOR EVERY NEIGHBOUR IN KERNEL for xi in range(xmn,xmx): for yi in range(ymn,ymx): if (xi != i or yi != j) and not mask[xi,yi]: nR = ((xi-cx)**2+(yi-cy)**2)**0.5 dr = R - nR #average radius between both p: midR = 0.5*(R+nR) #radian of neighbour p: nphi = atan2(yi-cy, xi-cx) #relative angle between both points: dphi = min((2*np.pi) - abs(PHI - nphi), abs(PHI - nphi)) dphi*=midR dist = ((fr*dr)**2+(fphi*dphi)**2)**2 wi = 1 / dist**(0.5*power) sumWi += wi value += wi * grid[xi,yi] if sumWi: grid[i,j] = value / sumWi return grid
python
def interpolateCircular2dStructuredIDW(grid, mask, kernel=15, power=2, fr=1, fphi=1, cx=0, cy=0): ''' same as interpolate2dStructuredIDW but calculation distance to neighbour using polar coordinates fr, fphi --> weight factors for radian and radius differences cx,cy -> polar center of the array e.g. middle->(sx//2+1,sy//2+1) ''' gx = grid.shape[0] gy = grid.shape[0] #FOR EVERY PIXEL for i in range(gx): for j in range(gy): if mask[i,j]: xmn = i-kernel if xmn < 0: xmn = 0 xmx = i+kernel if xmx > gx: xmx = gx ymn = j-kernel if ymn < 0: ymn = 0 ymx = j+kernel if ymx > gx: ymx = gy sumWi = 0.0 value = 0.0 #radius and radian to polar center: R = ((i-cx)**2+(j-cy)**2)**0.5 PHI = atan2(j-cy, i-cx) #FOR EVERY NEIGHBOUR IN KERNEL for xi in range(xmn,xmx): for yi in range(ymn,ymx): if (xi != i or yi != j) and not mask[xi,yi]: nR = ((xi-cx)**2+(yi-cy)**2)**0.5 dr = R - nR #average radius between both p: midR = 0.5*(R+nR) #radian of neighbour p: nphi = atan2(yi-cy, xi-cx) #relative angle between both points: dphi = min((2*np.pi) - abs(PHI - nphi), abs(PHI - nphi)) dphi*=midR dist = ((fr*dr)**2+(fphi*dphi)**2)**2 wi = 1 / dist**(0.5*power) sumWi += wi value += wi * grid[xi,yi] if sumWi: grid[i,j] = value / sumWi return grid
[ "def", "interpolateCircular2dStructuredIDW", "(", "grid", ",", "mask", ",", "kernel", "=", "15", ",", "power", "=", "2", ",", "fr", "=", "1", ",", "fphi", "=", "1", ",", "cx", "=", "0", ",", "cy", "=", "0", ")", ":", "gx", "=", "grid", ".", "shape", "[", "0", "]", "gy", "=", "grid", ".", "shape", "[", "0", "]", "#FOR EVERY PIXEL\r", "for", "i", "in", "range", "(", "gx", ")", ":", "for", "j", "in", "range", "(", "gy", ")", ":", "if", "mask", "[", "i", ",", "j", "]", ":", "xmn", "=", "i", "-", "kernel", "if", "xmn", "<", "0", ":", "xmn", "=", "0", "xmx", "=", "i", "+", "kernel", "if", "xmx", ">", "gx", ":", "xmx", "=", "gx", "ymn", "=", "j", "-", "kernel", "if", "ymn", "<", "0", ":", "ymn", "=", "0", "ymx", "=", "j", "+", "kernel", "if", "ymx", ">", "gx", ":", "ymx", "=", "gy", "sumWi", "=", "0.0", "value", "=", "0.0", "#radius and radian to polar center:\r", "R", "=", "(", "(", "i", "-", "cx", ")", "**", "2", "+", "(", "j", "-", "cy", ")", "**", "2", ")", "**", "0.5", "PHI", "=", "atan2", "(", "j", "-", "cy", ",", "i", "-", "cx", ")", "#FOR EVERY NEIGHBOUR IN KERNEL \r", "for", "xi", "in", "range", "(", "xmn", ",", "xmx", ")", ":", "for", "yi", "in", "range", "(", "ymn", ",", "ymx", ")", ":", "if", "(", "xi", "!=", "i", "or", "yi", "!=", "j", ")", "and", "not", "mask", "[", "xi", ",", "yi", "]", ":", "nR", "=", "(", "(", "xi", "-", "cx", ")", "**", "2", "+", "(", "yi", "-", "cy", ")", "**", "2", ")", "**", "0.5", "dr", "=", "R", "-", "nR", "#average radius between both p:\r", "midR", "=", "0.5", "*", "(", "R", "+", "nR", ")", "#radian of neighbour p:\r", "nphi", "=", "atan2", "(", "yi", "-", "cy", ",", "xi", "-", "cx", ")", "#relative angle between both points:\r", "dphi", "=", "min", "(", "(", "2", "*", "np", ".", "pi", ")", "-", "abs", "(", "PHI", "-", "nphi", ")", ",", "abs", "(", "PHI", "-", "nphi", ")", ")", "dphi", "*=", "midR", "dist", "=", "(", "(", "fr", "*", "dr", ")", "**", "2", "+", "(", "fphi", "*", "dphi", ")", "**", "2", ")", "**", "2", "wi", "=", "1", "/", "dist", "**", "(", "0.5", "*", "power", ")", "sumWi", "+=", "wi", "value", "+=", "wi", "*", "grid", "[", "xi", ",", "yi", "]", "if", "sumWi", ":", "grid", "[", "i", ",", "j", "]", "=", "value", "/", "sumWi", "return", "grid" ]
same as interpolate2dStructuredIDW but calculation distance to neighbour using polar coordinates fr, fphi --> weight factors for radian and radius differences cx,cy -> polar center of the array e.g. middle->(sx//2+1,sy//2+1)
[ "same", "as", "interpolate2dStructuredIDW", "but", "calculation", "distance", "to", "neighbour", "using", "polar", "coordinates", "fr", "fphi", "--", ">", "weight", "factors", "for", "radian", "and", "radius", "differences", "cx", "cy", "-", ">", "polar", "center", "of", "the", "array", "e", ".", "g", ".", "middle", "-", ">", "(", "sx", "//", "2", "+", "1", "sy", "//", "2", "+", "1", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolateCircular2dStructuredIDW.py#L8-L69
radjkarl/imgProcessor
imgProcessor/interpolate/interpolate2dStructuredCrossAvg.py
interpolate2dStructuredCrossAvg
def interpolate2dStructuredCrossAvg(grid, mask, kernel=15, power=2): ''' ####### usefull if large empty areas need to be filled ''' vals = np.empty(shape=4, dtype=grid.dtype) dist = np.empty(shape=4, dtype=np.uint16) weights = np.empty(shape=4, dtype=np.float32) valid = np.empty(shape=4, dtype=bool) return _calc(grid, mask, power, kernel, vals, dist, weights, valid)
python
def interpolate2dStructuredCrossAvg(grid, mask, kernel=15, power=2): ''' ####### usefull if large empty areas need to be filled ''' vals = np.empty(shape=4, dtype=grid.dtype) dist = np.empty(shape=4, dtype=np.uint16) weights = np.empty(shape=4, dtype=np.float32) valid = np.empty(shape=4, dtype=bool) return _calc(grid, mask, power, kernel, vals, dist, weights, valid)
[ "def", "interpolate2dStructuredCrossAvg", "(", "grid", ",", "mask", ",", "kernel", "=", "15", ",", "power", "=", "2", ")", ":", "vals", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "grid", ".", "dtype", ")", "dist", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "np", ".", "uint16", ")", "weights", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "np", ".", "float32", ")", "valid", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "bool", ")", "return", "_calc", "(", "grid", ",", "mask", ",", "power", ",", "kernel", ",", "vals", ",", "dist", ",", "weights", ",", "valid", ")" ]
####### usefull if large empty areas need to be filled
[ "#######", "usefull", "if", "large", "empty", "areas", "need", "to", "be", "filled" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dStructuredCrossAvg.py#L7-L19
radjkarl/imgProcessor
imgProcessor/utils/growPositions.py
growPositions
def growPositions(ksize): ''' return all positions around central point (0,0) for a given kernel size positions grow from smallest to biggest distances returns [positions] and [distances] from central cell ''' i = ksize*2+1 kk = np.ones( (i, i), dtype=bool) x,y = np.where(kk) pos = np.empty(shape=(i,i,2), dtype=int) pos[:,:,0]=x.reshape(i,i)-ksize pos[:,:,1]=y.reshape(i,i)-ksize dist = np.fromfunction(lambda x,y: ((x-ksize)**2 +(y-ksize)**2)**0.5, (i,i)) pos = np.dstack( np.unravel_index( np.argsort(dist.ravel()), (i, i)))[0,1:] pos0 = pos[:,0] pos1 = pos[:,1] return pos-ksize, dist[pos0, pos1]
python
def growPositions(ksize): ''' return all positions around central point (0,0) for a given kernel size positions grow from smallest to biggest distances returns [positions] and [distances] from central cell ''' i = ksize*2+1 kk = np.ones( (i, i), dtype=bool) x,y = np.where(kk) pos = np.empty(shape=(i,i,2), dtype=int) pos[:,:,0]=x.reshape(i,i)-ksize pos[:,:,1]=y.reshape(i,i)-ksize dist = np.fromfunction(lambda x,y: ((x-ksize)**2 +(y-ksize)**2)**0.5, (i,i)) pos = np.dstack( np.unravel_index( np.argsort(dist.ravel()), (i, i)))[0,1:] pos0 = pos[:,0] pos1 = pos[:,1] return pos-ksize, dist[pos0, pos1]
[ "def", "growPositions", "(", "ksize", ")", ":", "i", "=", "ksize", "*", "2", "+", "1", "kk", "=", "np", ".", "ones", "(", "(", "i", ",", "i", ")", ",", "dtype", "=", "bool", ")", "x", ",", "y", "=", "np", ".", "where", "(", "kk", ")", "pos", "=", "np", ".", "empty", "(", "shape", "=", "(", "i", ",", "i", ",", "2", ")", ",", "dtype", "=", "int", ")", "pos", "[", ":", ",", ":", ",", "0", "]", "=", "x", ".", "reshape", "(", "i", ",", "i", ")", "-", "ksize", "pos", "[", ":", ",", ":", ",", "1", "]", "=", "y", ".", "reshape", "(", "i", ",", "i", ")", "-", "ksize", "dist", "=", "np", ".", "fromfunction", "(", "lambda", "x", ",", "y", ":", "(", "(", "x", "-", "ksize", ")", "**", "2", "+", "(", "y", "-", "ksize", ")", "**", "2", ")", "**", "0.5", ",", "(", "i", ",", "i", ")", ")", "pos", "=", "np", ".", "dstack", "(", "np", ".", "unravel_index", "(", "np", ".", "argsort", "(", "dist", ".", "ravel", "(", ")", ")", ",", "(", "i", ",", "i", ")", ")", ")", "[", "0", ",", "1", ":", "]", "pos0", "=", "pos", "[", ":", ",", "0", "]", "pos1", "=", "pos", "[", ":", ",", "1", "]", "return", "pos", "-", "ksize", ",", "dist", "[", "pos0", ",", "pos1", "]" ]
return all positions around central point (0,0) for a given kernel size positions grow from smallest to biggest distances returns [positions] and [distances] from central cell
[ "return", "all", "positions", "around", "central", "point", "(", "0", "0", ")", "for", "a", "given", "kernel", "size", "positions", "grow", "from", "smallest", "to", "biggest", "distances", "returns", "[", "positions", "]", "and", "[", "distances", "]", "from", "central", "cell" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/growPositions.py#L5-L31
radjkarl/imgProcessor
imgProcessor/reader/qImageToArray.py
qImageToArray
def qImageToArray(qimage, dtype = 'array'): """Convert QImage to numpy.ndarray. The dtype defaults to uint8 for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array) for 32bit color images. You can pass a different dtype to use, or 'array' to get a 3D uint8 array for color images.""" result_shape = (qimage.height(), qimage.width()) temp_shape = (qimage.height(), qimage.bytesPerLine() * 8 // qimage.depth()) if qimage.format() in (QtGui.QImage.Format_ARGB32_Premultiplied, QtGui.QImage.Format_ARGB32, QtGui.QImage.Format_RGB32): if dtype == 'rec': dtype = np.dtype({'b': (np.uint8, 0), 'g': (np.uint8, 1), 'r': (np.uint8, 2), 'a': (np.uint8, 3)}) elif dtype == 'array': dtype = np.uint8 result_shape += (4, ) temp_shape += (4, ) elif qimage.format() == QtGui.QImage.Format_Indexed8: dtype = np.uint8 else: raise ValueError("qimage2numpy only supports 32bit and 8bit images") # FIXME: raise error if alignment does not match buf = qimage.bits().asstring(qimage.byteCount()) result = np.frombuffer(buf, dtype).reshape(temp_shape) if result_shape != temp_shape: result = result[:,:result_shape[1]] if qimage.format() == QtGui.QImage.Format_RGB32 and dtype == np.uint8: #case byteorder == 'little' result = result[...,:3] #byteorder == 'big' -> get ARGB result = result[...,::-1] return result
python
def qImageToArray(qimage, dtype = 'array'): """Convert QImage to numpy.ndarray. The dtype defaults to uint8 for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array) for 32bit color images. You can pass a different dtype to use, or 'array' to get a 3D uint8 array for color images.""" result_shape = (qimage.height(), qimage.width()) temp_shape = (qimage.height(), qimage.bytesPerLine() * 8 // qimage.depth()) if qimage.format() in (QtGui.QImage.Format_ARGB32_Premultiplied, QtGui.QImage.Format_ARGB32, QtGui.QImage.Format_RGB32): if dtype == 'rec': dtype = np.dtype({'b': (np.uint8, 0), 'g': (np.uint8, 1), 'r': (np.uint8, 2), 'a': (np.uint8, 3)}) elif dtype == 'array': dtype = np.uint8 result_shape += (4, ) temp_shape += (4, ) elif qimage.format() == QtGui.QImage.Format_Indexed8: dtype = np.uint8 else: raise ValueError("qimage2numpy only supports 32bit and 8bit images") # FIXME: raise error if alignment does not match buf = qimage.bits().asstring(qimage.byteCount()) result = np.frombuffer(buf, dtype).reshape(temp_shape) if result_shape != temp_shape: result = result[:,:result_shape[1]] if qimage.format() == QtGui.QImage.Format_RGB32 and dtype == np.uint8: #case byteorder == 'little' result = result[...,:3] #byteorder == 'big' -> get ARGB result = result[...,::-1] return result
[ "def", "qImageToArray", "(", "qimage", ",", "dtype", "=", "'array'", ")", ":", "result_shape", "=", "(", "qimage", ".", "height", "(", ")", ",", "qimage", ".", "width", "(", ")", ")", "temp_shape", "=", "(", "qimage", ".", "height", "(", ")", ",", "qimage", ".", "bytesPerLine", "(", ")", "*", "8", "//", "qimage", ".", "depth", "(", ")", ")", "if", "qimage", ".", "format", "(", ")", "in", "(", "QtGui", ".", "QImage", ".", "Format_ARGB32_Premultiplied", ",", "QtGui", ".", "QImage", ".", "Format_ARGB32", ",", "QtGui", ".", "QImage", ".", "Format_RGB32", ")", ":", "if", "dtype", "==", "'rec'", ":", "dtype", "=", "np", ".", "dtype", "(", "{", "'b'", ":", "(", "np", ".", "uint8", ",", "0", ")", ",", "'g'", ":", "(", "np", ".", "uint8", ",", "1", ")", ",", "'r'", ":", "(", "np", ".", "uint8", ",", "2", ")", ",", "'a'", ":", "(", "np", ".", "uint8", ",", "3", ")", "}", ")", "elif", "dtype", "==", "'array'", ":", "dtype", "=", "np", ".", "uint8", "result_shape", "+=", "(", "4", ",", ")", "temp_shape", "+=", "(", "4", ",", ")", "elif", "qimage", ".", "format", "(", ")", "==", "QtGui", ".", "QImage", ".", "Format_Indexed8", ":", "dtype", "=", "np", ".", "uint8", "else", ":", "raise", "ValueError", "(", "\"qimage2numpy only supports 32bit and 8bit images\"", ")", "# FIXME: raise error if alignment does not match\r", "buf", "=", "qimage", ".", "bits", "(", ")", ".", "asstring", "(", "qimage", ".", "byteCount", "(", ")", ")", "result", "=", "np", ".", "frombuffer", "(", "buf", ",", "dtype", ")", ".", "reshape", "(", "temp_shape", ")", "if", "result_shape", "!=", "temp_shape", ":", "result", "=", "result", "[", ":", ",", ":", "result_shape", "[", "1", "]", "]", "if", "qimage", ".", "format", "(", ")", "==", "QtGui", ".", "QImage", ".", "Format_RGB32", "and", "dtype", "==", "np", ".", "uint8", ":", "#case byteorder == 'little'\r", "result", "=", "result", "[", "...", ",", ":", "3", "]", "#byteorder == 'big' -> get ARGB\r", "result", "=", "result", "[", "...", ",", ":", ":", "-", "1", "]", "return", "result" ]
Convert QImage to numpy.ndarray. The dtype defaults to uint8 for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array) for 32bit color images. You can pass a different dtype to use, or 'array' to get a 3D uint8 array for color images.
[ "Convert", "QImage", "to", "numpy", ".", "ndarray", ".", "The", "dtype", "defaults", "to", "uint8", "for", "QImage", ".", "Format_Indexed8", "or", "bgra_dtype", "(", "i", ".", "e", ".", "a", "record", "array", ")", "for", "32bit", "color", "images", ".", "You", "can", "pass", "a", "different", "dtype", "to", "use", "or", "array", "to", "get", "a", "3D", "uint8", "array", "for", "color", "images", "." ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/reader/qImageToArray.py#L8-L43
radjkarl/imgProcessor
imgProcessor/filters/varYSizeGaussianFilter.py
varYSizeGaussianFilter
def varYSizeGaussianFilter(arr, stdyrange, stdx=0, modex='wrap', modey='reflect'): ''' applies gaussian_filter on input array but allowing variable ksize in y stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value stdyrange(tuple,list) -> minimum and maximum size as (mn,mx) stdyrange(np.array) -> all different ksizes in y ''' assert arr.ndim == 2, 'only works on 2d arrays at the moment' s0 = arr.shape[0] #create stdys: if isinstance(stdyrange, np.ndarray): assert len(stdyrange)==s0, '[stdyrange] needs to have same length as [arr]' stdys = stdyrange else: if type(stdyrange) not in (list, tuple): stdyrange = (0,stdyrange) mn,mx = stdyrange stdys = np.linspace(mn,mx,s0) #prepare array for convolution: kx = int(stdx*2.5) kx += 1-kx%2 ky = int(mx*2.5) ky += 1-ky%2 arr2 = extendArrayForConvolution(arr, (kx, ky), modex, modey) #create convolution kernels: inp = np.zeros((ky,kx)) inp[ky//2, kx//2] = 1 kernels = np.empty((s0,ky,kx)) for i in range(s0): stdy = stdys[i] kernels[i] = gaussian_filter(inp, (stdy,stdx)) out = np.empty_like(arr) _2dConvolutionYdependentKernel(arr2, out, kernels) return out
python
def varYSizeGaussianFilter(arr, stdyrange, stdx=0, modex='wrap', modey='reflect'): ''' applies gaussian_filter on input array but allowing variable ksize in y stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value stdyrange(tuple,list) -> minimum and maximum size as (mn,mx) stdyrange(np.array) -> all different ksizes in y ''' assert arr.ndim == 2, 'only works on 2d arrays at the moment' s0 = arr.shape[0] #create stdys: if isinstance(stdyrange, np.ndarray): assert len(stdyrange)==s0, '[stdyrange] needs to have same length as [arr]' stdys = stdyrange else: if type(stdyrange) not in (list, tuple): stdyrange = (0,stdyrange) mn,mx = stdyrange stdys = np.linspace(mn,mx,s0) #prepare array for convolution: kx = int(stdx*2.5) kx += 1-kx%2 ky = int(mx*2.5) ky += 1-ky%2 arr2 = extendArrayForConvolution(arr, (kx, ky), modex, modey) #create convolution kernels: inp = np.zeros((ky,kx)) inp[ky//2, kx//2] = 1 kernels = np.empty((s0,ky,kx)) for i in range(s0): stdy = stdys[i] kernels[i] = gaussian_filter(inp, (stdy,stdx)) out = np.empty_like(arr) _2dConvolutionYdependentKernel(arr2, out, kernels) return out
[ "def", "varYSizeGaussianFilter", "(", "arr", ",", "stdyrange", ",", "stdx", "=", "0", ",", "modex", "=", "'wrap'", ",", "modey", "=", "'reflect'", ")", ":", "assert", "arr", ".", "ndim", "==", "2", ",", "'only works on 2d arrays at the moment'", "s0", "=", "arr", ".", "shape", "[", "0", "]", "#create stdys:\r", "if", "isinstance", "(", "stdyrange", ",", "np", ".", "ndarray", ")", ":", "assert", "len", "(", "stdyrange", ")", "==", "s0", ",", "'[stdyrange] needs to have same length as [arr]'", "stdys", "=", "stdyrange", "else", ":", "if", "type", "(", "stdyrange", ")", "not", "in", "(", "list", ",", "tuple", ")", ":", "stdyrange", "=", "(", "0", ",", "stdyrange", ")", "mn", ",", "mx", "=", "stdyrange", "stdys", "=", "np", ".", "linspace", "(", "mn", ",", "mx", ",", "s0", ")", "#prepare array for convolution:\r", "kx", "=", "int", "(", "stdx", "*", "2.5", ")", "kx", "+=", "1", "-", "kx", "%", "2", "ky", "=", "int", "(", "mx", "*", "2.5", ")", "ky", "+=", "1", "-", "ky", "%", "2", "arr2", "=", "extendArrayForConvolution", "(", "arr", ",", "(", "kx", ",", "ky", ")", ",", "modex", ",", "modey", ")", "#create convolution kernels:\r", "inp", "=", "np", ".", "zeros", "(", "(", "ky", ",", "kx", ")", ")", "inp", "[", "ky", "//", "2", ",", "kx", "//", "2", "]", "=", "1", "kernels", "=", "np", ".", "empty", "(", "(", "s0", ",", "ky", ",", "kx", ")", ")", "for", "i", "in", "range", "(", "s0", ")", ":", "stdy", "=", "stdys", "[", "i", "]", "kernels", "[", "i", "]", "=", "gaussian_filter", "(", "inp", ",", "(", "stdy", ",", "stdx", ")", ")", "out", "=", "np", ".", "empty_like", "(", "arr", ")", "_2dConvolutionYdependentKernel", "(", "arr2", ",", "out", ",", "kernels", ")", "return", "out" ]
applies gaussian_filter on input array but allowing variable ksize in y stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value stdyrange(tuple,list) -> minimum and maximum size as (mn,mx) stdyrange(np.array) -> all different ksizes in y
[ "applies", "gaussian_filter", "on", "input", "array", "but", "allowing", "variable", "ksize", "in", "y", "stdyrange", "(", "int", ")", "-", ">", "maximum", "ksize", "-", "ksizes", "will", "increase", "from", "0", "to", "given", "value", "stdyrange", "(", "tuple", "list", ")", "-", ">", "minimum", "and", "maximum", "size", "as", "(", "mn", "mx", ")", "stdyrange", "(", "np", ".", "array", ")", "-", ">", "all", "different", "ksizes", "in", "y" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/varYSizeGaussianFilter.py#L9-L50
radjkarl/imgProcessor
imgProcessor/equations/numbaGaussian2d.py
numbaGaussian2d
def numbaGaussian2d(psf, sy, sx): ''' 2d Gaussian to be used in numba code ''' ps0, ps1 = psf.shape c0,c1 = ps0//2, ps1//2 ssx = 2*sx**2 ssy = 2*sy**2 for i in range(ps0): for j in range(ps1): psf[i,j]=exp( -( (i-c0)**2/ssy +(j-c1)**2/ssx) ) psf/=psf.sum()
python
def numbaGaussian2d(psf, sy, sx): ''' 2d Gaussian to be used in numba code ''' ps0, ps1 = psf.shape c0,c1 = ps0//2, ps1//2 ssx = 2*sx**2 ssy = 2*sy**2 for i in range(ps0): for j in range(ps1): psf[i,j]=exp( -( (i-c0)**2/ssy +(j-c1)**2/ssx) ) psf/=psf.sum()
[ "def", "numbaGaussian2d", "(", "psf", ",", "sy", ",", "sx", ")", ":", "ps0", ",", "ps1", "=", "psf", ".", "shape", "c0", ",", "c1", "=", "ps0", "//", "2", ",", "ps1", "//", "2", "ssx", "=", "2", "*", "sx", "**", "2", "ssy", "=", "2", "*", "sy", "**", "2", "for", "i", "in", "range", "(", "ps0", ")", ":", "for", "j", "in", "range", "(", "ps1", ")", ":", "psf", "[", "i", ",", "j", "]", "=", "exp", "(", "-", "(", "(", "i", "-", "c0", ")", "**", "2", "/", "ssy", "+", "(", "j", "-", "c1", ")", "**", "2", "/", "ssx", ")", ")", "psf", "/=", "psf", ".", "sum", "(", ")" ]
2d Gaussian to be used in numba code
[ "2d", "Gaussian", "to", "be", "used", "in", "numba", "code" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/numbaGaussian2d.py#L10-L22
radjkarl/imgProcessor
imgProcessor/measure/SNR/estimateBackgroundLevel.py
estimateBackgroundLevel
def estimateBackgroundLevel(img, image_is_artefact_free=False, min_rel_size=0.05, max_abs_size=11): ''' estimate background level through finding the most homogeneous area and take its average min_size - relative size of the examined area ''' s0,s1 = img.shape[:2] s = min(max_abs_size, int(max(s0,s1)*min_rel_size)) arr = np.zeros(shape=(s0-2*s, s1-2*s), dtype=img.dtype) #fill arr: _spatialStd(img, arr, s) #most homogeneous area: i,j = np.unravel_index(arr.argmin(), arr.shape) sub = img[int(i+0.5*s):int(i+s*1.5), int(j+s*0.5):int(j+s*1.5)] return np.median(sub)
python
def estimateBackgroundLevel(img, image_is_artefact_free=False, min_rel_size=0.05, max_abs_size=11): ''' estimate background level through finding the most homogeneous area and take its average min_size - relative size of the examined area ''' s0,s1 = img.shape[:2] s = min(max_abs_size, int(max(s0,s1)*min_rel_size)) arr = np.zeros(shape=(s0-2*s, s1-2*s), dtype=img.dtype) #fill arr: _spatialStd(img, arr, s) #most homogeneous area: i,j = np.unravel_index(arr.argmin(), arr.shape) sub = img[int(i+0.5*s):int(i+s*1.5), int(j+s*0.5):int(j+s*1.5)] return np.median(sub)
[ "def", "estimateBackgroundLevel", "(", "img", ",", "image_is_artefact_free", "=", "False", ",", "min_rel_size", "=", "0.05", ",", "max_abs_size", "=", "11", ")", ":", "s0", ",", "s1", "=", "img", ".", "shape", "[", ":", "2", "]", "s", "=", "min", "(", "max_abs_size", ",", "int", "(", "max", "(", "s0", ",", "s1", ")", "*", "min_rel_size", ")", ")", "arr", "=", "np", ".", "zeros", "(", "shape", "=", "(", "s0", "-", "2", "*", "s", ",", "s1", "-", "2", "*", "s", ")", ",", "dtype", "=", "img", ".", "dtype", ")", "#fill arr:\r", "_spatialStd", "(", "img", ",", "arr", ",", "s", ")", "#most homogeneous area:\r", "i", ",", "j", "=", "np", ".", "unravel_index", "(", "arr", ".", "argmin", "(", ")", ",", "arr", ".", "shape", ")", "sub", "=", "img", "[", "int", "(", "i", "+", "0.5", "*", "s", ")", ":", "int", "(", "i", "+", "s", "*", "1.5", ")", ",", "int", "(", "j", "+", "s", "*", "0.5", ")", ":", "int", "(", "j", "+", "s", "*", "1.5", ")", "]", "return", "np", ".", "median", "(", "sub", ")" ]
estimate background level through finding the most homogeneous area and take its average min_size - relative size of the examined area
[ "estimate", "background", "level", "through", "finding", "the", "most", "homogeneous", "area", "and", "take", "its", "average", "min_size", "-", "relative", "size", "of", "the", "examined", "area" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/SNR/estimateBackgroundLevel.py#L11-L31
radjkarl/imgProcessor
imgProcessor/physics/emissivity_vs_angle.py
EL_Si_module
def EL_Si_module(): ''' returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017 ''' arr = np.array([ [2.5, 1.00281 ], [7.5, 1.00238 ], [12.5, 1.00174], [17.5, 1.00204 ], [22.5, 1.00054 ], [27.5, 0.998255], [32.5, 0.995351], [37.5, 0.991246], [42.5, 0.985304], [47.5, 0.975338], [52.5, 0.960455], [57.5, 0.937544], [62.5, 0.900607], [67.5, 0.844636], [72.5, 0.735028], [77.5, 0.57492 ], [82.5, 0.263214], [87.5, 0.123062] ]) angles = arr[:,0] vals = arr[:,1] vals[vals>1]=1 return angles, vals
python
def EL_Si_module(): ''' returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017 ''' arr = np.array([ [2.5, 1.00281 ], [7.5, 1.00238 ], [12.5, 1.00174], [17.5, 1.00204 ], [22.5, 1.00054 ], [27.5, 0.998255], [32.5, 0.995351], [37.5, 0.991246], [42.5, 0.985304], [47.5, 0.975338], [52.5, 0.960455], [57.5, 0.937544], [62.5, 0.900607], [67.5, 0.844636], [72.5, 0.735028], [77.5, 0.57492 ], [82.5, 0.263214], [87.5, 0.123062] ]) angles = arr[:,0] vals = arr[:,1] vals[vals>1]=1 return angles, vals
[ "def", "EL_Si_module", "(", ")", ":", "arr", "=", "np", ".", "array", "(", "[", "[", "2.5", ",", "1.00281", "]", ",", "[", "7.5", ",", "1.00238", "]", ",", "[", "12.5", ",", "1.00174", "]", ",", "[", "17.5", ",", "1.00204", "]", ",", "[", "22.5", ",", "1.00054", "]", ",", "[", "27.5", ",", "0.998255", "]", ",", "[", "32.5", ",", "0.995351", "]", ",", "[", "37.5", ",", "0.991246", "]", ",", "[", "42.5", ",", "0.985304", "]", ",", "[", "47.5", ",", "0.975338", "]", ",", "[", "52.5", ",", "0.960455", "]", ",", "[", "57.5", ",", "0.937544", "]", ",", "[", "62.5", ",", "0.900607", "]", ",", "[", "67.5", ",", "0.844636", "]", ",", "[", "72.5", ",", "0.735028", "]", ",", "[", "77.5", ",", "0.57492", "]", ",", "[", "82.5", ",", "0.263214", "]", ",", "[", "87.5", ",", "0.123062", "]", "]", ")", "angles", "=", "arr", "[", ":", ",", "0", "]", "vals", "=", "arr", "[", ":", ",", "1", "]", "vals", "[", "vals", ">", "1", "]", "=", "1", "return", "angles", ",", "vals" ]
returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017
[ "returns", "angular", "dependent", "EL", "emissivity", "of", "a", "PV", "module", "calculated", "of", "nanmedian", "(", "persp", "-", "corrected", "EL", "module", "/", "reference", "module", ")", "published", "in", "K", ".", "Bedrich", ":", "Quantitative", "Electroluminescence", "Measurement", "on", "PV", "devices", "PhD", "Thesis", "2017" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/physics/emissivity_vs_angle.py#L8-L42
radjkarl/imgProcessor
imgProcessor/physics/emissivity_vs_angle.py
TG_glass
def TG_glass(): ''' reflected temperature for 250DEG Glass published in IEC 62446-3 TS: Photovoltaic (PV) systems - Requirements for testing, documentation and maintenance - Part 3: Outdoor infrared thermography of photovoltaic modules and plants p Page 12 ''' vals = np.array([(80,0.88), (75,0.88), (70,0.88), (65,0.88), (60,0.88), (55,0.88), (50,0.87), (45,0.86), (40,0.85), (35,0.83), (30,0.80), (25,0.76), (20,0.7), (15,0.60), (10,0.44)]) #invert angle reference: vals[:,0]=90-vals[:,0] #make emissivity relative: vals[:,1]/=vals[0,1] return vals[:,0], vals[:,1]
python
def TG_glass(): ''' reflected temperature for 250DEG Glass published in IEC 62446-3 TS: Photovoltaic (PV) systems - Requirements for testing, documentation and maintenance - Part 3: Outdoor infrared thermography of photovoltaic modules and plants p Page 12 ''' vals = np.array([(80,0.88), (75,0.88), (70,0.88), (65,0.88), (60,0.88), (55,0.88), (50,0.87), (45,0.86), (40,0.85), (35,0.83), (30,0.80), (25,0.76), (20,0.7), (15,0.60), (10,0.44)]) #invert angle reference: vals[:,0]=90-vals[:,0] #make emissivity relative: vals[:,1]/=vals[0,1] return vals[:,0], vals[:,1]
[ "def", "TG_glass", "(", ")", ":", "vals", "=", "np", ".", "array", "(", "[", "(", "80", ",", "0.88", ")", ",", "(", "75", ",", "0.88", ")", ",", "(", "70", ",", "0.88", ")", ",", "(", "65", ",", "0.88", ")", ",", "(", "60", ",", "0.88", ")", ",", "(", "55", ",", "0.88", ")", ",", "(", "50", ",", "0.87", ")", ",", "(", "45", ",", "0.86", ")", ",", "(", "40", ",", "0.85", ")", ",", "(", "35", ",", "0.83", ")", ",", "(", "30", ",", "0.80", ")", ",", "(", "25", ",", "0.76", ")", ",", "(", "20", ",", "0.7", ")", ",", "(", "15", ",", "0.60", ")", ",", "(", "10", ",", "0.44", ")", "]", ")", "#invert angle reference:\r", "vals", "[", ":", ",", "0", "]", "=", "90", "-", "vals", "[", ":", ",", "0", "]", "#make emissivity relative:\r", "vals", "[", ":", ",", "1", "]", "/=", "vals", "[", "0", ",", "1", "]", "return", "vals", "[", ":", ",", "0", "]", ",", "vals", "[", ":", ",", "1", "]" ]
reflected temperature for 250DEG Glass published in IEC 62446-3 TS: Photovoltaic (PV) systems - Requirements for testing, documentation and maintenance - Part 3: Outdoor infrared thermography of photovoltaic modules and plants p Page 12
[ "reflected", "temperature", "for", "250DEG", "Glass", "published", "in", "IEC", "62446", "-", "3", "TS", ":", "Photovoltaic", "(", "PV", ")", "systems", "-", "Requirements", "for", "testing", "documentation", "and", "maintenance", "-", "Part", "3", ":", "Outdoor", "infrared", "thermography", "of", "photovoltaic", "modules", "and", "plants", "p", "Page", "12" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/physics/emissivity_vs_angle.py#L45-L72
radjkarl/imgProcessor
imgProcessor/camera/flatField/sensitivity.py
sensitivity
def sensitivity(imgs, bg=None): ''' Extract pixel sensitivity from a set of homogeneously illuminated images This method is detailed in Section 5 of: --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- ''' bg = getBackground(bg) for n, i in enumerate(imgs): i = imread(i, dtype=float) i -= bg smooth = fastMean(median_filter(i, 3)) i /= smooth if n == 0: out = i else: out += i out /= (n + 1) return out
python
def sensitivity(imgs, bg=None): ''' Extract pixel sensitivity from a set of homogeneously illuminated images This method is detailed in Section 5 of: --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- ''' bg = getBackground(bg) for n, i in enumerate(imgs): i = imread(i, dtype=float) i -= bg smooth = fastMean(median_filter(i, 3)) i /= smooth if n == 0: out = i else: out += i out /= (n + 1) return out
[ "def", "sensitivity", "(", "imgs", ",", "bg", "=", "None", ")", ":", "bg", "=", "getBackground", "(", "bg", ")", "for", "n", ",", "i", "in", "enumerate", "(", "imgs", ")", ":", "i", "=", "imread", "(", "i", ",", "dtype", "=", "float", ")", "i", "-=", "bg", "smooth", "=", "fastMean", "(", "median_filter", "(", "i", ",", "3", ")", ")", "i", "/=", "smooth", "if", "n", "==", "0", ":", "out", "=", "i", "else", ":", "out", "+=", "i", "out", "/=", "(", "n", "+", "1", ")", "return", "out" ]
Extract pixel sensitivity from a set of homogeneously illuminated images This method is detailed in Section 5 of: --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 ---
[ "Extract", "pixel", "sensitivity", "from", "a", "set", "of", "homogeneously", "illuminated", "images", "This", "method", "is", "detailed", "in", "Section", "5", "of", ":", "---", "K", ".", "Bedrich", "M", ".", "Bokalic", "et", "al", ".", ":", "ELECTROLUMINESCENCE", "IMAGING", "OF", "PV", "DEVICES", ":", "ADVANCED", "FLAT", "FIELD", "CALIBRATION", "2017", "---" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/sensitivity.py#L7-L30
radjkarl/imgProcessor
imgProcessor/simulate/navierStokes.py
navierStokes2d
def navierStokes2d(u, v, p, dt, nt, rho, nu, boundaryConditionUV, boundardConditionP, nit=100): ''' solves the Navier-Stokes equation for incompressible flow one a regular 2d grid u,v,p --> initial velocity(u,v) and pressure(p) maps dt --> time step nt --> number of time steps to caluclate rho, nu --> material constants nit --> number of iteration to solve the pressure field ''' #next u, v, p maps: un = np.empty_like(u) vn = np.empty_like(v) pn = np.empty_like(p) #poisson equation ==> laplace term = b[source term] b = np.zeros_like(p) ny,nx = p.shape #cell size: dx = 2 / (nx - 1) dy = 2 / (ny - 1) #next time step: for _ in range(nt): un[:] = u vn[:] = v #pressure _buildB(b, rho, dt, u, v, dx, dy) for _ in range(nit): _pressurePoisson(p, pn, dx, dy, b) boundardConditionP(p) #UV _calcUV(u, v, un, p,vn, dt, dx, dy, rho, nu) boundaryConditionUV(u,v) return u, v, p
python
def navierStokes2d(u, v, p, dt, nt, rho, nu, boundaryConditionUV, boundardConditionP, nit=100): ''' solves the Navier-Stokes equation for incompressible flow one a regular 2d grid u,v,p --> initial velocity(u,v) and pressure(p) maps dt --> time step nt --> number of time steps to caluclate rho, nu --> material constants nit --> number of iteration to solve the pressure field ''' #next u, v, p maps: un = np.empty_like(u) vn = np.empty_like(v) pn = np.empty_like(p) #poisson equation ==> laplace term = b[source term] b = np.zeros_like(p) ny,nx = p.shape #cell size: dx = 2 / (nx - 1) dy = 2 / (ny - 1) #next time step: for _ in range(nt): un[:] = u vn[:] = v #pressure _buildB(b, rho, dt, u, v, dx, dy) for _ in range(nit): _pressurePoisson(p, pn, dx, dy, b) boundardConditionP(p) #UV _calcUV(u, v, un, p,vn, dt, dx, dy, rho, nu) boundaryConditionUV(u,v) return u, v, p
[ "def", "navierStokes2d", "(", "u", ",", "v", ",", "p", ",", "dt", ",", "nt", ",", "rho", ",", "nu", ",", "boundaryConditionUV", ",", "boundardConditionP", ",", "nit", "=", "100", ")", ":", "#next u, v, p maps:\r", "un", "=", "np", ".", "empty_like", "(", "u", ")", "vn", "=", "np", ".", "empty_like", "(", "v", ")", "pn", "=", "np", ".", "empty_like", "(", "p", ")", "#poisson equation ==> laplace term = b[source term]\r", "b", "=", "np", ".", "zeros_like", "(", "p", ")", "ny", ",", "nx", "=", "p", ".", "shape", "#cell size:\r", "dx", "=", "2", "/", "(", "nx", "-", "1", ")", "dy", "=", "2", "/", "(", "ny", "-", "1", ")", "#next time step:\r", "for", "_", "in", "range", "(", "nt", ")", ":", "un", "[", ":", "]", "=", "u", "vn", "[", ":", "]", "=", "v", "#pressure\r", "_buildB", "(", "b", ",", "rho", ",", "dt", ",", "u", ",", "v", ",", "dx", ",", "dy", ")", "for", "_", "in", "range", "(", "nit", ")", ":", "_pressurePoisson", "(", "p", ",", "pn", ",", "dx", ",", "dy", ",", "b", ")", "boundardConditionP", "(", "p", ")", "#UV\r", "_calcUV", "(", "u", ",", "v", ",", "un", ",", "p", ",", "vn", ",", "dt", ",", "dx", ",", "dy", ",", "rho", ",", "nu", ")", "boundaryConditionUV", "(", "u", ",", "v", ")", "return", "u", ",", "v", ",", "p" ]
solves the Navier-Stokes equation for incompressible flow one a regular 2d grid u,v,p --> initial velocity(u,v) and pressure(p) maps dt --> time step nt --> number of time steps to caluclate rho, nu --> material constants nit --> number of iteration to solve the pressure field
[ "solves", "the", "Navier", "-", "Stokes", "equation", "for", "incompressible", "flow", "one", "a", "regular", "2d", "grid", "u", "v", "p", "--", ">", "initial", "velocity", "(", "u", "v", ")", "and", "pressure", "(", "p", ")", "maps", "dt", "--", ">", "time", "step", "nt", "--", ">", "number", "of", "time", "steps", "to", "caluclate", "rho", "nu", "--", ">", "material", "constants", "nit", "--", ">", "number", "of", "iteration", "to", "solve", "the", "pressure", "field" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/simulate/navierStokes.py#L8-L49
radjkarl/imgProcessor
imgProcessor/simulate/navierStokes.py
shiftImage
def shiftImage(u, v, t, img, interpolation=cv2.INTER_LANCZOS4): ''' remap an image using velocity field ''' ny,nx = u.shape sy, sx = np.mgrid[:float(ny):1,:float(nx):1] sx += u*t sy += v*t return cv2.remap(img.astype(np.float32), (sx).astype(np.float32), (sy).astype(np.float32), interpolation)
python
def shiftImage(u, v, t, img, interpolation=cv2.INTER_LANCZOS4): ''' remap an image using velocity field ''' ny,nx = u.shape sy, sx = np.mgrid[:float(ny):1,:float(nx):1] sx += u*t sy += v*t return cv2.remap(img.astype(np.float32), (sx).astype(np.float32), (sy).astype(np.float32), interpolation)
[ "def", "shiftImage", "(", "u", ",", "v", ",", "t", ",", "img", ",", "interpolation", "=", "cv2", ".", "INTER_LANCZOS4", ")", ":", "ny", ",", "nx", "=", "u", ".", "shape", "sy", ",", "sx", "=", "np", ".", "mgrid", "[", ":", "float", "(", "ny", ")", ":", "1", ",", ":", "float", "(", "nx", ")", ":", "1", "]", "sx", "+=", "u", "*", "t", "sy", "+=", "v", "*", "t", "return", "cv2", ".", "remap", "(", "img", ".", "astype", "(", "np", ".", "float32", ")", ",", "(", "sx", ")", ".", "astype", "(", "np", ".", "float32", ")", ",", "(", "sy", ")", ".", "astype", "(", "np", ".", "float32", ")", ",", "interpolation", ")" ]
remap an image using velocity field
[ "remap", "an", "image", "using", "velocity", "field" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/simulate/navierStokes.py#L52-L62
radjkarl/imgProcessor
imgProcessor/filters/addNoise.py
addNoise
def addNoise(img, snr=25, rShot=0.5): ''' adds Gaussian (thermal) and shot noise to [img] [img] is assumed to be noise free [rShot] - shot noise ratio relative to all noise ''' s0, s1 = img.shape[:2] m = img.mean() if np.isnan(m): m = np.nanmean(img) assert m != 0, 'image mean cannot be zero' img = img / m noise = np.random.normal(size=s0 * s1).reshape(s0, s1) if rShot > 0: noise *= (rShot * img**0.5 + 1) noise /= np.nanstd(noise) noise[np.isnan(noise)] = 0 return m * (img + noise / snr)
python
def addNoise(img, snr=25, rShot=0.5): ''' adds Gaussian (thermal) and shot noise to [img] [img] is assumed to be noise free [rShot] - shot noise ratio relative to all noise ''' s0, s1 = img.shape[:2] m = img.mean() if np.isnan(m): m = np.nanmean(img) assert m != 0, 'image mean cannot be zero' img = img / m noise = np.random.normal(size=s0 * s1).reshape(s0, s1) if rShot > 0: noise *= (rShot * img**0.5 + 1) noise /= np.nanstd(noise) noise[np.isnan(noise)] = 0 return m * (img + noise / snr)
[ "def", "addNoise", "(", "img", ",", "snr", "=", "25", ",", "rShot", "=", "0.5", ")", ":", "s0", ",", "s1", "=", "img", ".", "shape", "[", ":", "2", "]", "m", "=", "img", ".", "mean", "(", ")", "if", "np", ".", "isnan", "(", "m", ")", ":", "m", "=", "np", ".", "nanmean", "(", "img", ")", "assert", "m", "!=", "0", ",", "'image mean cannot be zero'", "img", "=", "img", "/", "m", "noise", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "s0", "*", "s1", ")", ".", "reshape", "(", "s0", ",", "s1", ")", "if", "rShot", ">", "0", ":", "noise", "*=", "(", "rShot", "*", "img", "**", "0.5", "+", "1", ")", "noise", "/=", "np", ".", "nanstd", "(", "noise", ")", "noise", "[", "np", ".", "isnan", "(", "noise", ")", "]", "=", "0", "return", "m", "*", "(", "img", "+", "noise", "/", "snr", ")" ]
adds Gaussian (thermal) and shot noise to [img] [img] is assumed to be noise free [rShot] - shot noise ratio relative to all noise
[ "adds", "Gaussian", "(", "thermal", ")", "and", "shot", "noise", "to", "[", "img", "]", "[", "img", "]", "is", "assumed", "to", "be", "noise", "free", "[", "rShot", "]", "-", "shot", "noise", "ratio", "relative", "to", "all", "noise" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/addNoise.py#L4-L25
radjkarl/imgProcessor
imgProcessor/filters/coarseMaximum.py
coarseMaximum
def coarseMaximum(arr, shape): ''' return an array of [shape] where every cell equals the localised maximum of the given array [arr] at the same (scalled) position ''' ss0, ss1 = shape s0, s1 = arr.shape pos0 = linspace2(0, s0, ss0, dtype=int) pos1 = linspace2(0, s1, ss1, dtype=int) k0 = pos0[0] k1 = pos1[0] out = np.empty(shape, dtype=arr.dtype) _calc(arr, out, pos0, pos1, k0, k1, ss0, ss1) return out
python
def coarseMaximum(arr, shape): ''' return an array of [shape] where every cell equals the localised maximum of the given array [arr] at the same (scalled) position ''' ss0, ss1 = shape s0, s1 = arr.shape pos0 = linspace2(0, s0, ss0, dtype=int) pos1 = linspace2(0, s1, ss1, dtype=int) k0 = pos0[0] k1 = pos1[0] out = np.empty(shape, dtype=arr.dtype) _calc(arr, out, pos0, pos1, k0, k1, ss0, ss1) return out
[ "def", "coarseMaximum", "(", "arr", ",", "shape", ")", ":", "ss0", ",", "ss1", "=", "shape", "s0", ",", "s1", "=", "arr", ".", "shape", "pos0", "=", "linspace2", "(", "0", ",", "s0", ",", "ss0", ",", "dtype", "=", "int", ")", "pos1", "=", "linspace2", "(", "0", ",", "s1", ",", "ss1", ",", "dtype", "=", "int", ")", "k0", "=", "pos0", "[", "0", "]", "k1", "=", "pos1", "[", "0", "]", "out", "=", "np", ".", "empty", "(", "shape", ",", "dtype", "=", "arr", ".", "dtype", ")", "_calc", "(", "arr", ",", "out", ",", "pos0", ",", "pos1", ",", "k0", ",", "k1", ",", "ss0", ",", "ss1", ")", "return", "out" ]
return an array of [shape] where every cell equals the localised maximum of the given array [arr] at the same (scalled) position
[ "return", "an", "array", "of", "[", "shape", "]", "where", "every", "cell", "equals", "the", "localised", "maximum", "of", "the", "given", "array", "[", "arr", "]", "at", "the", "same", "(", "scalled", ")", "position" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/coarseMaximum.py#L8-L25
radjkarl/imgProcessor
imgProcessor/equations/angleOfView.py
angleOfView
def angleOfView(XY, shape=None, a=None, f=None, D=None, center=None): ''' Another vignetting equation from: M. Koentges, M. Siebert, and D. Hinken, "Quantitative analysis of PV-modules by electroluminescence images for quality control" 2009 f --> Focal length D --> Diameter of the aperture BOTH, D AND f NEED TO HAVE SAME UNIT [PX, mm ...] a --> Angular aperture center -> optical center [y,x] ''' if a is None: assert f is not None and D is not None #https://en.wikipedia.org/wiki/Angular_aperture a = 2*np.arctan2(D/2,f) x,y = XY try: c0,c1 = center except: s0,s1 = shape c0,c1 = s0/2, s1/2 rx = (x-c0)**2 ry = (y-c1)**2 return 1 / (1+np.tan(a)*((rx+ry)/c0))**0.5
python
def angleOfView(XY, shape=None, a=None, f=None, D=None, center=None): ''' Another vignetting equation from: M. Koentges, M. Siebert, and D. Hinken, "Quantitative analysis of PV-modules by electroluminescence images for quality control" 2009 f --> Focal length D --> Diameter of the aperture BOTH, D AND f NEED TO HAVE SAME UNIT [PX, mm ...] a --> Angular aperture center -> optical center [y,x] ''' if a is None: assert f is not None and D is not None #https://en.wikipedia.org/wiki/Angular_aperture a = 2*np.arctan2(D/2,f) x,y = XY try: c0,c1 = center except: s0,s1 = shape c0,c1 = s0/2, s1/2 rx = (x-c0)**2 ry = (y-c1)**2 return 1 / (1+np.tan(a)*((rx+ry)/c0))**0.5
[ "def", "angleOfView", "(", "XY", ",", "shape", "=", "None", ",", "a", "=", "None", ",", "f", "=", "None", ",", "D", "=", "None", ",", "center", "=", "None", ")", ":", "if", "a", "is", "None", ":", "assert", "f", "is", "not", "None", "and", "D", "is", "not", "None", "#https://en.wikipedia.org/wiki/Angular_aperture\r", "a", "=", "2", "*", "np", ".", "arctan2", "(", "D", "/", "2", ",", "f", ")", "x", ",", "y", "=", "XY", "try", ":", "c0", ",", "c1", "=", "center", "except", ":", "s0", ",", "s1", "=", "shape", "c0", ",", "c1", "=", "s0", "/", "2", ",", "s1", "/", "2", "rx", "=", "(", "x", "-", "c0", ")", "**", "2", "ry", "=", "(", "y", "-", "c1", ")", "**", "2", "return", "1", "/", "(", "1", "+", "np", ".", "tan", "(", "a", ")", "*", "(", "(", "rx", "+", "ry", ")", "/", "c0", ")", ")", "**", "0.5" ]
Another vignetting equation from: M. Koentges, M. Siebert, and D. Hinken, "Quantitative analysis of PV-modules by electroluminescence images for quality control" 2009 f --> Focal length D --> Diameter of the aperture BOTH, D AND f NEED TO HAVE SAME UNIT [PX, mm ...] a --> Angular aperture center -> optical center [y,x]
[ "Another", "vignetting", "equation", "from", ":", "M", ".", "Koentges", "M", ".", "Siebert", "and", "D", ".", "Hinken", "Quantitative", "analysis", "of", "PV", "-", "modules", "by", "electroluminescence", "images", "for", "quality", "control", "2009", "f", "--", ">", "Focal", "length", "D", "--", ">", "Diameter", "of", "the", "aperture", "BOTH", "D", "AND", "f", "NEED", "TO", "HAVE", "SAME", "UNIT", "[", "PX", "mm", "...", "]", "a", "--", ">", "Angular", "aperture", "center", "-", ">", "optical", "center", "[", "y", "x", "]" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/angleOfView.py#L12-L40
radjkarl/imgProcessor
imgProcessor/equations/angleOfView.py
angleOfView2
def angleOfView2(x,y, b, x0=None,y0=None): ''' Corrected AngleOfView equation by Koentges (via mail from 14/02/2017) b --> distance between the camera and the module in m x0 --> viewable with in the module plane of the camera in m y0 --> viewable height in the module plane of the camera in m x,y --> pixel position [m] from top left ''' if x0 is None: x0 = x[-1,-1] if y0 is None: y0 = y[-1,-1] return np.cos( np.arctan( np.sqrt( ( (x-x0/2)**2+(y-y0/2)**2 ) ) /b ) )
python
def angleOfView2(x,y, b, x0=None,y0=None): ''' Corrected AngleOfView equation by Koentges (via mail from 14/02/2017) b --> distance between the camera and the module in m x0 --> viewable with in the module plane of the camera in m y0 --> viewable height in the module plane of the camera in m x,y --> pixel position [m] from top left ''' if x0 is None: x0 = x[-1,-1] if y0 is None: y0 = y[-1,-1] return np.cos( np.arctan( np.sqrt( ( (x-x0/2)**2+(y-y0/2)**2 ) ) /b ) )
[ "def", "angleOfView2", "(", "x", ",", "y", ",", "b", ",", "x0", "=", "None", ",", "y0", "=", "None", ")", ":", "if", "x0", "is", "None", ":", "x0", "=", "x", "[", "-", "1", ",", "-", "1", "]", "if", "y0", "is", "None", ":", "y0", "=", "y", "[", "-", "1", ",", "-", "1", "]", "return", "np", ".", "cos", "(", "np", ".", "arctan", "(", "np", ".", "sqrt", "(", "(", "(", "x", "-", "x0", "/", "2", ")", "**", "2", "+", "(", "y", "-", "y0", "/", "2", ")", "**", "2", ")", ")", "/", "b", ")", ")" ]
Corrected AngleOfView equation by Koentges (via mail from 14/02/2017) b --> distance between the camera and the module in m x0 --> viewable with in the module plane of the camera in m y0 --> viewable height in the module plane of the camera in m x,y --> pixel position [m] from top left
[ "Corrected", "AngleOfView", "equation", "by", "Koentges", "(", "via", "mail", "from", "14", "/", "02", "/", "2017", ")", "b", "--", ">", "distance", "between", "the", "camera", "and", "the", "module", "in", "m", "x0", "--", ">", "viewable", "with", "in", "the", "module", "plane", "of", "the", "camera", "in", "m", "y0", "--", ">", "viewable", "height", "in", "the", "module", "plane", "of", "the", "camera", "in", "m", "x", "y", "--", ">", "pixel", "position", "[", "m", "]", "from", "top", "left" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/angleOfView.py#L43-L56
radjkarl/imgProcessor
imgProcessor/utils/gridLinesFromVertices.py
gridLinesFromVertices
def gridLinesFromVertices(edges, nCells, subgrid=None, dtype=float): """ ###TODO REDO TXT OPTIONAL: subgrid = ([x],[y]) --> relative positions e.g. subgrid = ( (0.3,0.7), () ) --> two subgrid lines in x - nothing in y Returns: horiz,vert -> arrays of (x,y) poly-lines if subgrid != None, Returns: horiz,vert, subhoriz, subvert ####### creates a regular 2d grid from given edge points (4*(x0,y0)) and number of cells in x and y Returns: tuple(4lists): horizontal and vertical lines as (x0,y0,x1,y1) """ nx, ny = nCells y, x = np.mgrid[0.:ny + 1, 0.:nx + 1] src = np.float32([[0, 0], [nx, 0], [nx, ny], [0, ny]]) dst = sortCorners(edges).astype(np.float32) homography = cv2.getPerspectiveTransform(src, dst) pts = np.float32((x.flatten(), y.flatten())).T pts = pts.reshape(1, *pts.shape) pts2 = cv2.perspectiveTransform(pts, homography)[0] horiz = pts2.reshape(ny + 1, nx + 1, 2) vert = np.swapaxes(horiz, 0, 1) subh, subv = [], [] if subgrid is not None: sh, sv = subgrid if len(sh): subh = np.empty(shape=(ny * len(sh), nx + 1, 2), dtype=np.float32) last_si = 0 for n, si in enumerate(sh): spts = pts[:, :-(nx + 1)] spts[..., 1] += si - last_si last_si = si spts2 = cv2.perspectiveTransform(spts, homography)[0] subh[n::len(sh)] = spts2.reshape(ny, nx + 1, 2) if len(sv): subv = np.empty(shape=(ny + 1, nx * len(sv), 2), dtype=np.float32) last_si = 0 sspts = pts.reshape(1, ny + 1, nx + 1, 2) sspts = sspts[:, :, :-1] sspts = sspts.reshape(1, (ny + 1) * nx, 2) for n, si in enumerate(sv): sspts[..., 0] += si - last_si last_si = si spts2 = cv2.perspectiveTransform(sspts, homography)[0] subv[:, n::len(sv)] = spts2.reshape(ny + 1, nx, 2) subv = np.swapaxes(subv, 0, 1) return [horiz, vert, subh, subv]
python
def gridLinesFromVertices(edges, nCells, subgrid=None, dtype=float): """ ###TODO REDO TXT OPTIONAL: subgrid = ([x],[y]) --> relative positions e.g. subgrid = ( (0.3,0.7), () ) --> two subgrid lines in x - nothing in y Returns: horiz,vert -> arrays of (x,y) poly-lines if subgrid != None, Returns: horiz,vert, subhoriz, subvert ####### creates a regular 2d grid from given edge points (4*(x0,y0)) and number of cells in x and y Returns: tuple(4lists): horizontal and vertical lines as (x0,y0,x1,y1) """ nx, ny = nCells y, x = np.mgrid[0.:ny + 1, 0.:nx + 1] src = np.float32([[0, 0], [nx, 0], [nx, ny], [0, ny]]) dst = sortCorners(edges).astype(np.float32) homography = cv2.getPerspectiveTransform(src, dst) pts = np.float32((x.flatten(), y.flatten())).T pts = pts.reshape(1, *pts.shape) pts2 = cv2.perspectiveTransform(pts, homography)[0] horiz = pts2.reshape(ny + 1, nx + 1, 2) vert = np.swapaxes(horiz, 0, 1) subh, subv = [], [] if subgrid is not None: sh, sv = subgrid if len(sh): subh = np.empty(shape=(ny * len(sh), nx + 1, 2), dtype=np.float32) last_si = 0 for n, si in enumerate(sh): spts = pts[:, :-(nx + 1)] spts[..., 1] += si - last_si last_si = si spts2 = cv2.perspectiveTransform(spts, homography)[0] subh[n::len(sh)] = spts2.reshape(ny, nx + 1, 2) if len(sv): subv = np.empty(shape=(ny + 1, nx * len(sv), 2), dtype=np.float32) last_si = 0 sspts = pts.reshape(1, ny + 1, nx + 1, 2) sspts = sspts[:, :, :-1] sspts = sspts.reshape(1, (ny + 1) * nx, 2) for n, si in enumerate(sv): sspts[..., 0] += si - last_si last_si = si spts2 = cv2.perspectiveTransform(sspts, homography)[0] subv[:, n::len(sv)] = spts2.reshape(ny + 1, nx, 2) subv = np.swapaxes(subv, 0, 1) return [horiz, vert, subh, subv]
[ "def", "gridLinesFromVertices", "(", "edges", ",", "nCells", ",", "subgrid", "=", "None", ",", "dtype", "=", "float", ")", ":", "nx", ",", "ny", "=", "nCells", "y", ",", "x", "=", "np", ".", "mgrid", "[", "0.", ":", "ny", "+", "1", ",", "0.", ":", "nx", "+", "1", "]", "src", "=", "np", ".", "float32", "(", "[", "[", "0", ",", "0", "]", ",", "[", "nx", ",", "0", "]", ",", "[", "nx", ",", "ny", "]", ",", "[", "0", ",", "ny", "]", "]", ")", "dst", "=", "sortCorners", "(", "edges", ")", ".", "astype", "(", "np", ".", "float32", ")", "homography", "=", "cv2", ".", "getPerspectiveTransform", "(", "src", ",", "dst", ")", "pts", "=", "np", ".", "float32", "(", "(", "x", ".", "flatten", "(", ")", ",", "y", ".", "flatten", "(", ")", ")", ")", ".", "T", "pts", "=", "pts", ".", "reshape", "(", "1", ",", "*", "pts", ".", "shape", ")", "pts2", "=", "cv2", ".", "perspectiveTransform", "(", "pts", ",", "homography", ")", "[", "0", "]", "horiz", "=", "pts2", ".", "reshape", "(", "ny", "+", "1", ",", "nx", "+", "1", ",", "2", ")", "vert", "=", "np", ".", "swapaxes", "(", "horiz", ",", "0", ",", "1", ")", "subh", ",", "subv", "=", "[", "]", ",", "[", "]", "if", "subgrid", "is", "not", "None", ":", "sh", ",", "sv", "=", "subgrid", "if", "len", "(", "sh", ")", ":", "subh", "=", "np", ".", "empty", "(", "shape", "=", "(", "ny", "*", "len", "(", "sh", ")", ",", "nx", "+", "1", ",", "2", ")", ",", "dtype", "=", "np", ".", "float32", ")", "last_si", "=", "0", "for", "n", ",", "si", "in", "enumerate", "(", "sh", ")", ":", "spts", "=", "pts", "[", ":", ",", ":", "-", "(", "nx", "+", "1", ")", "]", "spts", "[", "...", ",", "1", "]", "+=", "si", "-", "last_si", "last_si", "=", "si", "spts2", "=", "cv2", ".", "perspectiveTransform", "(", "spts", ",", "homography", ")", "[", "0", "]", "subh", "[", "n", ":", ":", "len", "(", "sh", ")", "]", "=", "spts2", ".", "reshape", "(", "ny", ",", "nx", "+", "1", ",", "2", ")", "if", "len", "(", "sv", ")", ":", "subv", "=", "np", ".", "empty", "(", "shape", "=", "(", "ny", "+", "1", ",", "nx", "*", "len", "(", "sv", ")", ",", "2", ")", ",", "dtype", "=", "np", ".", "float32", ")", "last_si", "=", "0", "sspts", "=", "pts", ".", "reshape", "(", "1", ",", "ny", "+", "1", ",", "nx", "+", "1", ",", "2", ")", "sspts", "=", "sspts", "[", ":", ",", ":", ",", ":", "-", "1", "]", "sspts", "=", "sspts", ".", "reshape", "(", "1", ",", "(", "ny", "+", "1", ")", "*", "nx", ",", "2", ")", "for", "n", ",", "si", "in", "enumerate", "(", "sv", ")", ":", "sspts", "[", "...", ",", "0", "]", "+=", "si", "-", "last_si", "last_si", "=", "si", "spts2", "=", "cv2", ".", "perspectiveTransform", "(", "sspts", ",", "homography", ")", "[", "0", "]", "subv", "[", ":", ",", "n", ":", ":", "len", "(", "sv", ")", "]", "=", "spts2", ".", "reshape", "(", "ny", "+", "1", ",", "nx", ",", "2", ")", "subv", "=", "np", ".", "swapaxes", "(", "subv", ",", "0", ",", "1", ")", "return", "[", "horiz", ",", "vert", ",", "subh", ",", "subv", "]" ]
###TODO REDO TXT OPTIONAL: subgrid = ([x],[y]) --> relative positions e.g. subgrid = ( (0.3,0.7), () ) --> two subgrid lines in x - nothing in y Returns: horiz,vert -> arrays of (x,y) poly-lines if subgrid != None, Returns: horiz,vert, subhoriz, subvert ####### creates a regular 2d grid from given edge points (4*(x0,y0)) and number of cells in x and y Returns: tuple(4lists): horizontal and vertical lines as (x0,y0,x1,y1)
[ "###TODO", "REDO", "TXT", "OPTIONAL", ":", "subgrid", "=", "(", "[", "x", "]", "[", "y", "]", ")", "--", ">", "relative", "positions", "e", ".", "g", ".", "subgrid", "=", "(", "(", "0", ".", "3", "0", ".", "7", ")", "()", ")", "--", ">", "two", "subgrid", "lines", "in", "x", "-", "nothing", "in", "y", "Returns", ":", "horiz", "vert", "-", ">", "arrays", "of", "(", "x", "y", ")", "poly", "-", "lines", "if", "subgrid", "!", "=", "None", "Returns", ":", "horiz", "vert", "subhoriz", "subvert", "#######", "creates", "a", "regular", "2d", "grid", "from", "given", "edge", "points", "(", "4", "*", "(", "x0", "y0", "))", "and", "number", "of", "cells", "in", "x", "and", "y", "Returns", ":", "tuple", "(", "4lists", ")", ":", "horizontal", "and", "vertical", "lines", "as", "(", "x0", "y0", "x1", "y1", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/gridLinesFromVertices.py#L39-L107
radjkarl/imgProcessor
imgProcessor/measure/sharpness/_base.py
SharpnessBase.MTF50
def MTF50(self, MTFx,MTFy): ''' return object resolution as [line pairs/mm] where MTF=50% see http://www.imatest.com/docs/sharpness/ ''' if self.mtf_x is None: self.MTF() f = UnivariateSpline(self.mtf_x, self.mtf_y-0.5) return f.roots()[0]
python
def MTF50(self, MTFx,MTFy): ''' return object resolution as [line pairs/mm] where MTF=50% see http://www.imatest.com/docs/sharpness/ ''' if self.mtf_x is None: self.MTF() f = UnivariateSpline(self.mtf_x, self.mtf_y-0.5) return f.roots()[0]
[ "def", "MTF50", "(", "self", ",", "MTFx", ",", "MTFy", ")", ":", "if", "self", ".", "mtf_x", "is", "None", ":", "self", ".", "MTF", "(", ")", "f", "=", "UnivariateSpline", "(", "self", ".", "mtf_x", ",", "self", ".", "mtf_y", "-", "0.5", ")", "return", "f", ".", "roots", "(", ")", "[", "0", "]" ]
return object resolution as [line pairs/mm] where MTF=50% see http://www.imatest.com/docs/sharpness/
[ "return", "object", "resolution", "as", "[", "line", "pairs", "/", "mm", "]", "where", "MTF", "=", "50%", "see", "http", ":", "//", "www", ".", "imatest", ".", "com", "/", "docs", "/", "sharpness", "/" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/_base.py#L21-L30
radjkarl/imgProcessor
imgProcessor/measure/sharpness/_base.py
SharpnessBase.MTF
def MTF(self, px_per_mm): ''' px_per_mm = cam_resolution / image_size ''' res = 100 #numeric resolution r = 4 #range +-r*std #size of 1 px: px_size = 1 / px_per_mm #standard deviation of the point-spread-function (PSF) as normal distributed: std = self.std*px_size #transform standard deviation from [px] to [mm] x = np.linspace(-r*std,r*std, res) #line spread function: lsf = self.gaussian1d(x, 1, 0, std) #MTF defined as Fourier transform of the line spread function: #abs() because result is complex y = abs(np.fft.fft(lsf)) #normalize fft so that max = 1 y /= np.max(y) #step length between xn and xn+1 dstep = r*std/res # Fourier frequencies - here: line pairs(cycles) per mm freq = np.fft.fftfreq(lsf.size, dstep) #limit mtf between [0-px_per_mm]: i = np.argmax(freq>px_per_mm) self.mtf_x = freq[:i] self.mtf_y = y[:i] return self.mtf_x, self.mtf_y
python
def MTF(self, px_per_mm): ''' px_per_mm = cam_resolution / image_size ''' res = 100 #numeric resolution r = 4 #range +-r*std #size of 1 px: px_size = 1 / px_per_mm #standard deviation of the point-spread-function (PSF) as normal distributed: std = self.std*px_size #transform standard deviation from [px] to [mm] x = np.linspace(-r*std,r*std, res) #line spread function: lsf = self.gaussian1d(x, 1, 0, std) #MTF defined as Fourier transform of the line spread function: #abs() because result is complex y = abs(np.fft.fft(lsf)) #normalize fft so that max = 1 y /= np.max(y) #step length between xn and xn+1 dstep = r*std/res # Fourier frequencies - here: line pairs(cycles) per mm freq = np.fft.fftfreq(lsf.size, dstep) #limit mtf between [0-px_per_mm]: i = np.argmax(freq>px_per_mm) self.mtf_x = freq[:i] self.mtf_y = y[:i] return self.mtf_x, self.mtf_y
[ "def", "MTF", "(", "self", ",", "px_per_mm", ")", ":", "res", "=", "100", "#numeric resolution\r", "r", "=", "4", "#range +-r*std\r", "#size of 1 px:\r", "px_size", "=", "1", "/", "px_per_mm", "#standard deviation of the point-spread-function (PSF) as normal distributed:\r", "std", "=", "self", ".", "std", "*", "px_size", "#transform standard deviation from [px] to [mm]\r", "x", "=", "np", ".", "linspace", "(", "-", "r", "*", "std", ",", "r", "*", "std", ",", "res", ")", "#line spread function:\r", "lsf", "=", "self", ".", "gaussian1d", "(", "x", ",", "1", ",", "0", ",", "std", ")", "#MTF defined as Fourier transform of the line spread function:\r", "#abs() because result is complex\r", "y", "=", "abs", "(", "np", ".", "fft", ".", "fft", "(", "lsf", ")", ")", "#normalize fft so that max = 1\r", "y", "/=", "np", ".", "max", "(", "y", ")", "#step length between xn and xn+1\r", "dstep", "=", "r", "*", "std", "/", "res", "# Fourier frequencies - here: line pairs(cycles) per mm\r", "freq", "=", "np", ".", "fft", ".", "fftfreq", "(", "lsf", ".", "size", ",", "dstep", ")", "#limit mtf between [0-px_per_mm]:\r", "i", "=", "np", ".", "argmax", "(", "freq", ">", "px_per_mm", ")", "self", ".", "mtf_x", "=", "freq", "[", ":", "i", "]", "self", ".", "mtf_y", "=", "y", "[", ":", "i", "]", "return", "self", ".", "mtf_x", ",", "self", ".", "mtf_y" ]
px_per_mm = cam_resolution / image_size
[ "px_per_mm", "=", "cam_resolution", "/", "image_size" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/_base.py#L33-L62
radjkarl/imgProcessor
imgProcessor/measure/sharpness/_base.py
SharpnessBase.uncertaintyMap
def uncertaintyMap(self, psf, method='convolve', fitParams=None): ''' return the intensity based uncertainty due to the unsharpness of the image as standard deviation method = ['convolve' , 'unsupervised_wiener'] latter one also returns the reconstructed image (deconvolution) ''' #ignore background: #img[img<0]=0 ###noise should not influence sharpness uncertainty: ##img = median_filter(img, 3) # decrease noise in order not to overestimate result: img = scaleSignal(self.img, fitParams=fitParams) if method == 'convolve': #print 'convolve' blurred = convolve2d(img, psf, 'same') m = abs(img-blurred) / abs(img + blurred) m = np.nan_to_num(m) m*=self.std**2 m[m>1]=1 self.blur_distortion = m np.save('blurred', blurred) return m else: restored = unsupervised_wiener(img, psf)[0] m = abs(img-restored) / abs(img + restored) m = np.nan_to_num(m) m*=self.std**2 m[m>1]=1 self.blur_distortion = m return m, restored
python
def uncertaintyMap(self, psf, method='convolve', fitParams=None): ''' return the intensity based uncertainty due to the unsharpness of the image as standard deviation method = ['convolve' , 'unsupervised_wiener'] latter one also returns the reconstructed image (deconvolution) ''' #ignore background: #img[img<0]=0 ###noise should not influence sharpness uncertainty: ##img = median_filter(img, 3) # decrease noise in order not to overestimate result: img = scaleSignal(self.img, fitParams=fitParams) if method == 'convolve': #print 'convolve' blurred = convolve2d(img, psf, 'same') m = abs(img-blurred) / abs(img + blurred) m = np.nan_to_num(m) m*=self.std**2 m[m>1]=1 self.blur_distortion = m np.save('blurred', blurred) return m else: restored = unsupervised_wiener(img, psf)[0] m = abs(img-restored) / abs(img + restored) m = np.nan_to_num(m) m*=self.std**2 m[m>1]=1 self.blur_distortion = m return m, restored
[ "def", "uncertaintyMap", "(", "self", ",", "psf", ",", "method", "=", "'convolve'", ",", "fitParams", "=", "None", ")", ":", "#ignore background:\r", "#img[img<0]=0\r", "###noise should not influence sharpness uncertainty:\r", "##img = median_filter(img, 3)\r", "# decrease noise in order not to overestimate result:\r", "img", "=", "scaleSignal", "(", "self", ".", "img", ",", "fitParams", "=", "fitParams", ")", "if", "method", "==", "'convolve'", ":", "#print 'convolve'\r", "blurred", "=", "convolve2d", "(", "img", ",", "psf", ",", "'same'", ")", "m", "=", "abs", "(", "img", "-", "blurred", ")", "/", "abs", "(", "img", "+", "blurred", ")", "m", "=", "np", ".", "nan_to_num", "(", "m", ")", "m", "*=", "self", ".", "std", "**", "2", "m", "[", "m", ">", "1", "]", "=", "1", "self", ".", "blur_distortion", "=", "m", "np", ".", "save", "(", "'blurred'", ",", "blurred", ")", "return", "m", "else", ":", "restored", "=", "unsupervised_wiener", "(", "img", ",", "psf", ")", "[", "0", "]", "m", "=", "abs", "(", "img", "-", "restored", ")", "/", "abs", "(", "img", "+", "restored", ")", "m", "=", "np", ".", "nan_to_num", "(", "m", ")", "m", "*=", "self", ".", "std", "**", "2", "m", "[", "m", ">", "1", "]", "=", "1", "self", ".", "blur_distortion", "=", "m", "return", "m", ",", "restored" ]
return the intensity based uncertainty due to the unsharpness of the image as standard deviation method = ['convolve' , 'unsupervised_wiener'] latter one also returns the reconstructed image (deconvolution)
[ "return", "the", "intensity", "based", "uncertainty", "due", "to", "the", "unsharpness", "of", "the", "image", "as", "standard", "deviation", "method", "=", "[", "convolve", "unsupervised_wiener", "]", "latter", "one", "also", "returns", "the", "reconstructed", "image", "(", "deconvolution", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/_base.py#L66-L100
radjkarl/imgProcessor
imgProcessor/measure/sharpness/_base.py
SharpnessBase.stdDev
def stdDev(self): ''' get the standard deviation from the PSF is evaluated as 2d Gaussian ''' if self._corrPsf is None: self.psf() p = self._corrPsf.copy() mn = p.min() p[p<0.05*p.max()] = mn p-=mn p/=p.sum() x,y = self._psfGridCoords() x = x.flatten() y = y.flatten() guess = (1,1,0) param, _ = curve_fit(self._fn, (x,y), p.flatten(), guess) self._fitParam = param stdx,stdy = param[:2] self._std = (stdx+stdy) / 2 return self._std
python
def stdDev(self): ''' get the standard deviation from the PSF is evaluated as 2d Gaussian ''' if self._corrPsf is None: self.psf() p = self._corrPsf.copy() mn = p.min() p[p<0.05*p.max()] = mn p-=mn p/=p.sum() x,y = self._psfGridCoords() x = x.flatten() y = y.flatten() guess = (1,1,0) param, _ = curve_fit(self._fn, (x,y), p.flatten(), guess) self._fitParam = param stdx,stdy = param[:2] self._std = (stdx+stdy) / 2 return self._std
[ "def", "stdDev", "(", "self", ")", ":", "if", "self", ".", "_corrPsf", "is", "None", ":", "self", ".", "psf", "(", ")", "p", "=", "self", ".", "_corrPsf", ".", "copy", "(", ")", "mn", "=", "p", ".", "min", "(", ")", "p", "[", "p", "<", "0.05", "*", "p", ".", "max", "(", ")", "]", "=", "mn", "p", "-=", "mn", "p", "/=", "p", ".", "sum", "(", ")", "x", ",", "y", "=", "self", ".", "_psfGridCoords", "(", ")", "x", "=", "x", ".", "flatten", "(", ")", "y", "=", "y", ".", "flatten", "(", ")", "guess", "=", "(", "1", ",", "1", ",", "0", ")", "param", ",", "_", "=", "curve_fit", "(", "self", ".", "_fn", ",", "(", "x", ",", "y", ")", ",", "p", ".", "flatten", "(", ")", ",", "guess", ")", "self", ".", "_fitParam", "=", "param", "stdx", ",", "stdy", "=", "param", "[", ":", "2", "]", "self", ".", "_std", "=", "(", "stdx", "+", "stdy", ")", "/", "2", "return", "self", ".", "_std" ]
get the standard deviation from the PSF is evaluated as 2d Gaussian
[ "get", "the", "standard", "deviation", "from", "the", "PSF", "is", "evaluated", "as", "2d", "Gaussian" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/_base.py#L160-L185
radjkarl/imgProcessor
imgProcessor/interpolate/interpolate2dStructuredIDW.py
interpolate2dStructuredIDW
def interpolate2dStructuredIDW(grid, mask, kernel=15, power=2, fx=1, fy=1): ''' replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] ''' weights = np.empty(shape=((2*kernel+1,2*kernel+1))) for xi in range(-kernel,kernel+1): for yi in range(-kernel,kernel+1): dist = ((fx*xi)**2+(fy*yi)**2) if dist: weights[xi+kernel,yi+kernel] = 1 / dist**(0.5*power) return _calc(grid, mask, kernel, weights)
python
def interpolate2dStructuredIDW(grid, mask, kernel=15, power=2, fx=1, fy=1): ''' replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] ''' weights = np.empty(shape=((2*kernel+1,2*kernel+1))) for xi in range(-kernel,kernel+1): for yi in range(-kernel,kernel+1): dist = ((fx*xi)**2+(fy*yi)**2) if dist: weights[xi+kernel,yi+kernel] = 1 / dist**(0.5*power) return _calc(grid, mask, kernel, weights)
[ "def", "interpolate2dStructuredIDW", "(", "grid", ",", "mask", ",", "kernel", "=", "15", ",", "power", "=", "2", ",", "fx", "=", "1", ",", "fy", "=", "1", ")", ":", "weights", "=", "np", ".", "empty", "(", "shape", "=", "(", "(", "2", "*", "kernel", "+", "1", ",", "2", "*", "kernel", "+", "1", ")", ")", ")", "for", "xi", "in", "range", "(", "-", "kernel", ",", "kernel", "+", "1", ")", ":", "for", "yi", "in", "range", "(", "-", "kernel", ",", "kernel", "+", "1", ")", ":", "dist", "=", "(", "(", "fx", "*", "xi", ")", "**", "2", "+", "(", "fy", "*", "yi", ")", "**", "2", ")", "if", "dist", ":", "weights", "[", "xi", "+", "kernel", ",", "yi", "+", "kernel", "]", "=", "1", "/", "dist", "**", "(", "0.5", "*", "power", ")", "return", "_calc", "(", "grid", ",", "mask", ",", "kernel", ",", "weights", ")" ]
replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power]
[ "replace", "all", "values", "in", "[", "grid", "]", "indicated", "by", "[", "mask", "]", "with", "the", "inverse", "distance", "weighted", "interpolation", "of", "all", "values", "within", "px", "+", "-", "kernel", "[", "power", "]", "-", ">", "distance", "weighting", "factor", ":", "1", "/", "distance", "**", "[", "power", "]" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dStructuredIDW.py#L8-L23
radjkarl/imgProcessor
imgProcessor/uncertainty/temporalSignalStability.py
temporalSignalStability
def temporalSignalStability(imgs, times, down_scale_factor=1): ''' (Electroluminescence) signal is not stable over time especially next to cracks. This function takes a set of images and returns parameters, needed to transform uncertainty to other exposure times using [adjustUncertToExposureTime] return [signal uncertainty] obtained from linear fit to [imgs] [average event length] [ascent],[offset] of linear fit -------- [imgs] --> corrected EL images captured in sequence [times] --> absolute measurement times of all [imgs] e.g. every image was taken every 60 sec, then times=60,120,180... [down_scale_factor] --> down scale [imgs] to speed up process ------- More information can be found at ... ---- K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017 Subsection 5.1.4.3: Exposure Time Dependency ---- ''' imgs = np.asarray(imgs) s0, s1, s2 = imgs.shape #down scale imgs to speed up process: if down_scale_factor > 1: s1 //= down_scale_factor s2 //= down_scale_factor imgs2 = np.empty(shape=(s0, s1, s2)) for n, c in enumerate(imgs): imgs2[n] = cv2.resize(c, (s2, s1), interpolation=cv2.INTER_AREA) imgs = imgs2 # linear fit for every point in image set: ascent, offset, error = linRegressUsingMasked2dArrays( times, imgs, calcError=True) # functionally obtained [imgs]: fn_imgs = np.array([offset + t * ascent for t in times]) #difference between [imgs] for fit result: diff = imgs - fn_imgs diff = median_filter(diff, 5) error_t = np.tile(error, (s0, 1, 1)) # find events: evt = (np.abs(diff) > 0.5 * error_t) # calc average event length: avlen = _calcAvgLen(evt, np.empty(shape=evt.shape[1:])) #cannot calc event length smaller exposure time, so: i = avlen == 0 avlen = maskedFilter(avlen, mask=i, fn='mean', ksize=7, fill_mask=False) # remove single px: i = maximum_filter(i, 3) avlen[i] = 0 avlen = maximum_filter(avlen, 3) i = avlen == 0 avlen = median_filter(avlen, 3) avlen[i] = 0 return error, avlen, ascent, offset
python
def temporalSignalStability(imgs, times, down_scale_factor=1): ''' (Electroluminescence) signal is not stable over time especially next to cracks. This function takes a set of images and returns parameters, needed to transform uncertainty to other exposure times using [adjustUncertToExposureTime] return [signal uncertainty] obtained from linear fit to [imgs] [average event length] [ascent],[offset] of linear fit -------- [imgs] --> corrected EL images captured in sequence [times] --> absolute measurement times of all [imgs] e.g. every image was taken every 60 sec, then times=60,120,180... [down_scale_factor] --> down scale [imgs] to speed up process ------- More information can be found at ... ---- K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017 Subsection 5.1.4.3: Exposure Time Dependency ---- ''' imgs = np.asarray(imgs) s0, s1, s2 = imgs.shape #down scale imgs to speed up process: if down_scale_factor > 1: s1 //= down_scale_factor s2 //= down_scale_factor imgs2 = np.empty(shape=(s0, s1, s2)) for n, c in enumerate(imgs): imgs2[n] = cv2.resize(c, (s2, s1), interpolation=cv2.INTER_AREA) imgs = imgs2 # linear fit for every point in image set: ascent, offset, error = linRegressUsingMasked2dArrays( times, imgs, calcError=True) # functionally obtained [imgs]: fn_imgs = np.array([offset + t * ascent for t in times]) #difference between [imgs] for fit result: diff = imgs - fn_imgs diff = median_filter(diff, 5) error_t = np.tile(error, (s0, 1, 1)) # find events: evt = (np.abs(diff) > 0.5 * error_t) # calc average event length: avlen = _calcAvgLen(evt, np.empty(shape=evt.shape[1:])) #cannot calc event length smaller exposure time, so: i = avlen == 0 avlen = maskedFilter(avlen, mask=i, fn='mean', ksize=7, fill_mask=False) # remove single px: i = maximum_filter(i, 3) avlen[i] = 0 avlen = maximum_filter(avlen, 3) i = avlen == 0 avlen = median_filter(avlen, 3) avlen[i] = 0 return error, avlen, ascent, offset
[ "def", "temporalSignalStability", "(", "imgs", ",", "times", ",", "down_scale_factor", "=", "1", ")", ":", "imgs", "=", "np", ".", "asarray", "(", "imgs", ")", "s0", ",", "s1", ",", "s2", "=", "imgs", ".", "shape", "#down scale imgs to speed up process:\r", "if", "down_scale_factor", ">", "1", ":", "s1", "//=", "down_scale_factor", "s2", "//=", "down_scale_factor", "imgs2", "=", "np", ".", "empty", "(", "shape", "=", "(", "s0", ",", "s1", ",", "s2", ")", ")", "for", "n", ",", "c", "in", "enumerate", "(", "imgs", ")", ":", "imgs2", "[", "n", "]", "=", "cv2", ".", "resize", "(", "c", ",", "(", "s2", ",", "s1", ")", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ")", "imgs", "=", "imgs2", "# linear fit for every point in image set:\r", "ascent", ",", "offset", ",", "error", "=", "linRegressUsingMasked2dArrays", "(", "times", ",", "imgs", ",", "calcError", "=", "True", ")", "# functionally obtained [imgs]:\r", "fn_imgs", "=", "np", ".", "array", "(", "[", "offset", "+", "t", "*", "ascent", "for", "t", "in", "times", "]", ")", "#difference between [imgs] for fit result:\r", "diff", "=", "imgs", "-", "fn_imgs", "diff", "=", "median_filter", "(", "diff", ",", "5", ")", "error_t", "=", "np", ".", "tile", "(", "error", ",", "(", "s0", ",", "1", ",", "1", ")", ")", "# find events: \r", "evt", "=", "(", "np", ".", "abs", "(", "diff", ")", ">", "0.5", "*", "error_t", ")", "# calc average event length:\r", "avlen", "=", "_calcAvgLen", "(", "evt", ",", "np", ".", "empty", "(", "shape", "=", "evt", ".", "shape", "[", "1", ":", "]", ")", ")", "#cannot calc event length smaller exposure time, so:\r", "i", "=", "avlen", "==", "0", "avlen", "=", "maskedFilter", "(", "avlen", ",", "mask", "=", "i", ",", "fn", "=", "'mean'", ",", "ksize", "=", "7", ",", "fill_mask", "=", "False", ")", "# remove single px:\r", "i", "=", "maximum_filter", "(", "i", ",", "3", ")", "avlen", "[", "i", "]", "=", "0", "avlen", "=", "maximum_filter", "(", "avlen", ",", "3", ")", "i", "=", "avlen", "==", "0", "avlen", "=", "median_filter", "(", "avlen", ",", "3", ")", "avlen", "[", "i", "]", "=", "0", "return", "error", ",", "avlen", ",", "ascent", ",", "offset" ]
(Electroluminescence) signal is not stable over time especially next to cracks. This function takes a set of images and returns parameters, needed to transform uncertainty to other exposure times using [adjustUncertToExposureTime] return [signal uncertainty] obtained from linear fit to [imgs] [average event length] [ascent],[offset] of linear fit -------- [imgs] --> corrected EL images captured in sequence [times] --> absolute measurement times of all [imgs] e.g. every image was taken every 60 sec, then times=60,120,180... [down_scale_factor] --> down scale [imgs] to speed up process ------- More information can be found at ... ---- K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017 Subsection 5.1.4.3: Exposure Time Dependency ----
[ "(", "Electroluminescence", ")", "signal", "is", "not", "stable", "over", "time", "especially", "next", "to", "cracks", ".", "This", "function", "takes", "a", "set", "of", "images", "and", "returns", "parameters", "needed", "to", "transform", "uncertainty", "to", "other", "exposure", "times", "using", "[", "adjustUncertToExposureTime", "]", "return", "[", "signal", "uncertainty", "]", "obtained", "from", "linear", "fit", "to", "[", "imgs", "]", "[", "average", "event", "length", "]", "[", "ascent", "]", "[", "offset", "]", "of", "linear", "fit", "--------", "[", "imgs", "]", "--", ">", "corrected", "EL", "images", "captured", "in", "sequence", "[", "times", "]", "--", ">", "absolute", "measurement", "times", "of", "all", "[", "imgs", "]", "e", ".", "g", ".", "every", "image", "was", "taken", "every", "60", "sec", "then", "times", "=", "60", "120", "180", "...", "[", "down_scale_factor", "]", "--", ">", "down", "scale", "[", "imgs", "]", "to", "speed", "up", "process", "-------", "More", "information", "can", "be", "found", "at", "...", "----", "K", ".", "Bedrich", ":", "Quantitative", "Electroluminescence", "Imaging", "PhD", "Thesis", "2017", "Subsection", "5", ".", "1", ".", "4", ".", "3", ":", "Exposure", "Time", "Dependency", "----" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/temporalSignalStability.py#L12-L80
radjkarl/imgProcessor
imgProcessor/camera/flatField/vignettingFromSpotAverage.py
vignettingFromSpotAverage
def vignettingFromSpotAverage( images, bgImages=None, averageSpot=True, thresh=None): ''' [images] --> list of images containing small bright spots generated by the same device images at different positions within image plane depending on the calibrated waveband the device can be a LCD display or PV 1-cell mini module This method is referred as 'Method B' in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- Args: averageSpot(bool): True: take only the average intensity of each spot thresh(float): marks the minimum spot value (estimated with Otsus method otherwise) Returns: * array to be post processed * image mask containing valid positions ''' fitimg, mask = None, None mx = 0 for c, img in enumerate(images): print('%s/%s' % (c + 1, len(images))) if c == 0: avgBg = getBackground2(bgImages, img) img = imread(img, dtype=float) img -= avgBg # init: if fitimg is None: fitimg = np.zeros_like(img) mask = np.zeros_like(img, dtype=bool) # find spot: if thresh is None: t = threshold_otsu(img) else: t = thresh # take brightest spot spots, n = label(minimum_filter(img > t, 3), background=0, return_num=True) spot_sizes = [(spots == i).sum() for i in range(1, n + 1)] try: spot = (spots == np.argmax(spot_sizes) + 1) except ValueError: print("couldn't find spot in image") continue if averageSpot: spot = np.rint(center_of_mass(spot)).astype(int) mx2 = img[spot].max() else: mx2 = img[spot].mean() fitimg[spot] = img[spot] mask[spot] = 1 if mx2 > mx: mx = mx2 # scale [0...1]: fitimg /= mx return fitimg, mask
python
def vignettingFromSpotAverage( images, bgImages=None, averageSpot=True, thresh=None): ''' [images] --> list of images containing small bright spots generated by the same device images at different positions within image plane depending on the calibrated waveband the device can be a LCD display or PV 1-cell mini module This method is referred as 'Method B' in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- Args: averageSpot(bool): True: take only the average intensity of each spot thresh(float): marks the minimum spot value (estimated with Otsus method otherwise) Returns: * array to be post processed * image mask containing valid positions ''' fitimg, mask = None, None mx = 0 for c, img in enumerate(images): print('%s/%s' % (c + 1, len(images))) if c == 0: avgBg = getBackground2(bgImages, img) img = imread(img, dtype=float) img -= avgBg # init: if fitimg is None: fitimg = np.zeros_like(img) mask = np.zeros_like(img, dtype=bool) # find spot: if thresh is None: t = threshold_otsu(img) else: t = thresh # take brightest spot spots, n = label(minimum_filter(img > t, 3), background=0, return_num=True) spot_sizes = [(spots == i).sum() for i in range(1, n + 1)] try: spot = (spots == np.argmax(spot_sizes) + 1) except ValueError: print("couldn't find spot in image") continue if averageSpot: spot = np.rint(center_of_mass(spot)).astype(int) mx2 = img[spot].max() else: mx2 = img[spot].mean() fitimg[spot] = img[spot] mask[spot] = 1 if mx2 > mx: mx = mx2 # scale [0...1]: fitimg /= mx return fitimg, mask
[ "def", "vignettingFromSpotAverage", "(", "images", ",", "bgImages", "=", "None", ",", "averageSpot", "=", "True", ",", "thresh", "=", "None", ")", ":", "fitimg", ",", "mask", "=", "None", ",", "None", "mx", "=", "0", "for", "c", ",", "img", "in", "enumerate", "(", "images", ")", ":", "print", "(", "'%s/%s'", "%", "(", "c", "+", "1", ",", "len", "(", "images", ")", ")", ")", "if", "c", "==", "0", ":", "avgBg", "=", "getBackground2", "(", "bgImages", ",", "img", ")", "img", "=", "imread", "(", "img", ",", "dtype", "=", "float", ")", "img", "-=", "avgBg", "# init:\r", "if", "fitimg", "is", "None", ":", "fitimg", "=", "np", ".", "zeros_like", "(", "img", ")", "mask", "=", "np", ".", "zeros_like", "(", "img", ",", "dtype", "=", "bool", ")", "# find spot:\r", "if", "thresh", "is", "None", ":", "t", "=", "threshold_otsu", "(", "img", ")", "else", ":", "t", "=", "thresh", "# take brightest spot\r", "spots", ",", "n", "=", "label", "(", "minimum_filter", "(", "img", ">", "t", ",", "3", ")", ",", "background", "=", "0", ",", "return_num", "=", "True", ")", "spot_sizes", "=", "[", "(", "spots", "==", "i", ")", ".", "sum", "(", ")", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", "]", "try", ":", "spot", "=", "(", "spots", "==", "np", ".", "argmax", "(", "spot_sizes", ")", "+", "1", ")", "except", "ValueError", ":", "print", "(", "\"couldn't find spot in image\"", ")", "continue", "if", "averageSpot", ":", "spot", "=", "np", ".", "rint", "(", "center_of_mass", "(", "spot", ")", ")", ".", "astype", "(", "int", ")", "mx2", "=", "img", "[", "spot", "]", ".", "max", "(", ")", "else", ":", "mx2", "=", "img", "[", "spot", "]", ".", "mean", "(", ")", "fitimg", "[", "spot", "]", "=", "img", "[", "spot", "]", "mask", "[", "spot", "]", "=", "1", "if", "mx2", ">", "mx", ":", "mx", "=", "mx2", "# scale [0...1]:\r", "fitimg", "/=", "mx", "return", "fitimg", ",", "mask" ]
[images] --> list of images containing small bright spots generated by the same device images at different positions within image plane depending on the calibrated waveband the device can be a LCD display or PV 1-cell mini module This method is referred as 'Method B' in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- Args: averageSpot(bool): True: take only the average intensity of each spot thresh(float): marks the minimum spot value (estimated with Otsus method otherwise) Returns: * array to be post processed * image mask containing valid positions
[ "[", "images", "]", "--", ">", "list", "of", "images", "containing", "small", "bright", "spots", "generated", "by", "the", "same", "device", "images", "at", "different", "positions", "within", "image", "plane", "depending", "on", "the", "calibrated", "waveband", "the", "device", "can", "be", "a", "LCD", "display", "or", "PV", "1", "-", "cell", "mini", "module", "This", "method", "is", "referred", "as", "Method", "B", "in", "---", "K", ".", "Bedrich", "M", ".", "Bokalic", "et", "al", ".", ":", "ELECTROLUMINESCENCE", "IMAGING", "OF", "PV", "DEVICES", ":", "ADVANCED", "FLAT", "FIELD", "CALIBRATION", "2017", "---", "Args", ":", "averageSpot", "(", "bool", ")", ":", "True", ":", "take", "only", "the", "average", "intensity", "of", "each", "spot", "thresh", "(", "float", ")", ":", "marks", "the", "minimum", "spot", "value", "(", "estimated", "with", "Otsus", "method", "otherwise", ")", "Returns", ":", "*", "array", "to", "be", "post", "processed", "*", "image", "mask", "containing", "valid", "positions" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromSpotAverage.py#L15-L85
radjkarl/imgProcessor
imgProcessor/camera/lens/estimateSystematicErrorLensCorrection.py
simulateSytematicError
def simulateSytematicError(N_SAMPLES=5, N_IMAGES=10, SHOW_DETECTED_PATTERN=True, # GRAYSCALE=False, HEIGHT=500, PLOT_RESULTS=True, PLOT_ERROR_ARRAY=True, CAMERA_PARAM=None, PERSPECTIVE=True, ROTATION=True, RELATIVE_PATTERN_SIZE=0.5, POSITION=True, NOISE=25, BLUR=(3, 3), PATTERNS=None): ''' Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2] ''' print( 'calculate systematic error of the implemented calibration algorithms') # LOCATION OF PATTERN IMAGES folder = MEDIA_PATH if PATTERNS is None: PATTERNS = ('Chessboard', 'Asymmetric circles', 'Symmetric circles') patterns = OrderedDict(( # n of inner corners ('Chessboard', ((6, 9), 'chessboard_pattern_a3.svg')), ('Asymmetric circles', ((4, 11), 'acircles_pattern_a3.svg')), ('Symmetric circles', ((8, 11), 'circles_pattern_a3.svg')), )) # REMOVE PATTERNS THAT ARE NOT TO BE TESTED: [patterns.pop(key) for key in patterns if key not in PATTERNS] if SHOW_DETECTED_PATTERN: cv2.namedWindow('Pattern', cv2.WINDOW_NORMAL) # number of positive detected patterns: success = [] # list[N_SAMPLES] of random camera parameters fx, fy, cx, cy, k1, k2, k3, p1, p2 = [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of given-detected parameters: errl, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l = [ ], [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of magnitude(difference of displacement vector # array): dxl = [] dyl = [] # maintain aspect ratio of din a4, a3...: aspect_ratio_DIN = 2.0**0.5 width = int(round(HEIGHT / aspect_ratio_DIN)) if CAMERA_PARAM is None: CAMERA_PARAM = [ HEIGHT, HEIGHT, HEIGHT / 2, width / 2, 0.0, 0.01, 0.1, 0.01, 0.001] # ???CREATE N DIFFERENT RANDOM LENS ERRORS: for n in range(N_SAMPLES): # TODO: RANDOMIZE CAMERA ERROR?? fx.append(CAMERA_PARAM[0]) # * np.random.uniform(1, 2) ) fy.append(CAMERA_PARAM[1]) # * np.random.uniform(1, 2) ) cx.append(CAMERA_PARAM[2]) # * np.random.uniform(0.9, 1.1) ) cy.append(CAMERA_PARAM[3]) # * np.random.uniform(0.9, 1.1) ) k1.append(CAMERA_PARAM[4]) # + np.random.uniform(-1, 1)*0.1) k2.append(CAMERA_PARAM[5]) # + np.random.uniform(-1, 1)*0.01) p1.append(CAMERA_PARAM[6]) # + np.random.uniform(0, 1)*0.1) p2.append(CAMERA_PARAM[7]) # + np.random.uniform(0, 1)*0.01) k3.append(CAMERA_PARAM[8]) # + np.random.uniform(0, 1)*0.001) L = LensDistortion() # FOR EVERY METHOD: for method, (board_size, filename) in patterns.items(): f = folder.join(filename) # LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION: svg = QtSvg.QSvgRenderer(f) image = QtGui.QImage(width * 4, HEIGHT * 4, QtGui.QImage.Format_ARGB32) image.fill(QtCore.Qt.white) # Get QPainter that paints to the image painter = QtGui.QPainter(image) svg.render(painter) # Save, image format based on file extension # f = "rendered.png" # image.save(f) # # if GRAYSCALE: # img = cv2.imread(f, cv2.IMREAD_GRAYSCALE) # else: # img = cv2.imread(f) img = qImageToArray(image) success.append([]) fxl.append([]) errl.append([]) fyl.append([]) cxl.append([]) cyl.append([]) k1l.append([]) k2l.append([]) k3l.append([]) p1l.append([]) p2l.append([]) dxl.append([]) dyl.append([]) imgHeight, imgWidth = img.shape[0], img.shape[1] for n in range(N_SAMPLES): L.calibrate(board_size, method) print('SET PARAMS:', fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L.setCameraParams( fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L._coeffs['shape'] = (imgHeight, imgWidth) hw = imgWidth * 0.5 hh = imgHeight * 0.5 for m in range(N_IMAGES): pts1 = np.float32([[hw, hh + 100], [hw - 100, hh - 100], [hw + 100, hh - 100]]) pts2 = pts1.copy() if ROTATION: rotatePolygon(pts2, np.random.randint(0, 2 * np.pi)) if PERSPECTIVE: # CREATE A RANDOM PERSPECTIVE: pts2 += np.random.randint(-hw * 0.05, hh * 0.05, size=(3, 2)) # MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE: pts2 *= RELATIVE_PATTERN_SIZE # MOVE TO THE CENTER pts2[:, 0] += hw * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * (1 - RELATIVE_PATTERN_SIZE) if POSITION: f = ((2 * np.random.rand(2)) - 1) pts2[:, 0] += hw * 0.7 * f[0] * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * 0.7 * f[1] * (1 - RELATIVE_PATTERN_SIZE) # EXEC PERSPECTICE, POSITION, ROTATION: M = cv2.getAffineTransform(pts1, pts2) img_warped = cv2.warpAffine( img, M, (imgWidth, imgHeight), borderValue=(230, 230, 230)) # DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR # WARPING DOWN img_warped = cv2.resize(img_warped, (width, HEIGHT)) # CREATE THE LENS DISTORTION: mapx, mapy = L.getDistortRectifyMap(width, HEIGHT) # print 664, mapx.shape img_distorted = cv2.remap( img_warped, mapx, mapy, cv2.INTER_LINEAR, borderValue=(230, 230, 230)) # img_distorted[img_distorted==0]=20 # img_distorted[img_distorted>100]=230 if BLUR: img_distorted = cv2.blur(img_distorted, BLUR) if NOISE: # soften, black and white more gray, and add noise img_distorted = img_distorted.astype(np.int16) img_distorted += (np.random.rand(*img_distorted.shape) * NOISE).astype(img_distorted.dtype) img_distorted = np.clip( img_distorted, 0, 255).astype(np.uint8) # plt.imshow(img_distorted) # plt.show() found = L.addImg(img_distorted) if SHOW_DETECTED_PATTERN and found: img_distorted = L.drawChessboard(img_distorted) cv2.imshow('Pattern', img_distorted) cv2.waitKey(1) success[-1].append(L.findCount) try: L._coeffs = None errl[-1].append(L.coeffs['reprojectionError']) L.correct(img_distorted) c = L.getCameraParams() print('GET PARAMS:', c) fxl[-1].append(fx[n] - c[0]) fyl[-1].append(fy[n] - c[1]) cxl[-1].append(cx[n] - c[2]) cyl[-1].append(cy[n] - c[3]) k1l[-1].append(k1[n] - c[4]) k2l[-1].append(k2[n] - c[5]) k3l[-1].append(k3[n] - c[6]) p1l[-1].append(p1[n] - c[7]) p2l[-1].append(p2[n] - c[8]) if PLOT_ERROR_ARRAY: dx = (mapx - L.mapx) / 2 dy = (mapy - L.mapy) / 2 dxl[-1].append(dx) dyl[-1].append(dy) except NothingFound: print( "Couldn't create a calibration because no patterns were detected") del painter # AVERAGE SAMPLES AND GET STD dx_std, dx_mean = [], [] dy_std, dy_mean = [], [] mag = [] std = [] for patterndx, patterndy in zip(dxl, dyl): x = np.mean(patterndx, axis=0) dx_mean.append(x) y = np.mean(patterndy, axis=0) dy_mean.append(y) x = np.std(patterndx, axis=0) mag.append((x**2 + y**2)**0.5) dx_std.append(x) y = np.std(patterndy, axis=0) dy_std.append(y) std.append((x**2 + y**2)**0.5) # PLOT p = len(patterns) if PLOT_RESULTS: fig, axs = plt.subplots(nrows=2, ncols=5) axs = np.array(axs).ravel() for ax, typ, tname in zip(axs, (success, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l), ('Success rate', 'fx', 'fy', 'cx', 'cy', 'k1', 'k2', 'k3', 'p1', 'p2') ): ax.set_title(tname) # , showmeans=True, meanline=True)#labels=patterns.keys()) ax.boxplot(typ, notch=0, sym='+', vert=1, whis=1.5) # , ha=ha[n]) ax.set_xticklabels(patterns.keys(), rotation=40, fontsize=8) if PLOT_ERROR_ARRAY: mmin = np.min(mag) mmax = np.max(mag) smin = np.min(std) smax = np.max(std) plt.figure() for n, pattern in enumerate(patterns.keys()): plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper', vmin=mmin, vmax=mmax) if n == p - 1: plt.colorbar(label='Average') plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper', vmin=smin, vmax=smax) if n == p - 1: plt.colorbar(label='Standard deviation') fig = plt.figure() fig.suptitle('Individually scaled') for n, pattern in enumerate(patterns.keys()): # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() plt.quiver( X, Y, dy_mean[n][::ix, ::iy] * 20, dx_mean[n][::ix, ::iy] * 20) plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper') plt.colorbar() # plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10) ############################################# fig = plt.figure() fig.suptitle('Spatial uncertainty + deflection') for n, pattern in enumerate(patterns.keys()): L.calibrate(board_size, method) # there is alot of additional calc thats not necassary: L.setCameraParams( fx[0], fy[0], cx[0], cy[0], k1[0], k2[0], k3[0], p1[0], p2[0]) L._coeffs['shape'] = (imgHeight, imgWidth) L._coeffs['reprojectionError'] = np.mean(errl[n]) # deflection_x, deflection_y = L.getDeflection(width, HEIGHT) # deflection_x += dx_mean[n] # deflection_y += dy_mean[n] ux, uy = L.standardUncertainties() plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() # DEFLECTION plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(np.linalg.norm([ux, uy], axis=0), origin='upper') plt.colorbar() # DEFL: VECTORS # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.quiver(X, Y, ux[::ix, ::iy] * 20, uy[::ix, ::iy] * 20) if PLOT_ERROR_ARRAY or PLOT_RESULTS: plt.show() return dx_mean, dy_mean
python
def simulateSytematicError(N_SAMPLES=5, N_IMAGES=10, SHOW_DETECTED_PATTERN=True, # GRAYSCALE=False, HEIGHT=500, PLOT_RESULTS=True, PLOT_ERROR_ARRAY=True, CAMERA_PARAM=None, PERSPECTIVE=True, ROTATION=True, RELATIVE_PATTERN_SIZE=0.5, POSITION=True, NOISE=25, BLUR=(3, 3), PATTERNS=None): ''' Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2] ''' print( 'calculate systematic error of the implemented calibration algorithms') # LOCATION OF PATTERN IMAGES folder = MEDIA_PATH if PATTERNS is None: PATTERNS = ('Chessboard', 'Asymmetric circles', 'Symmetric circles') patterns = OrderedDict(( # n of inner corners ('Chessboard', ((6, 9), 'chessboard_pattern_a3.svg')), ('Asymmetric circles', ((4, 11), 'acircles_pattern_a3.svg')), ('Symmetric circles', ((8, 11), 'circles_pattern_a3.svg')), )) # REMOVE PATTERNS THAT ARE NOT TO BE TESTED: [patterns.pop(key) for key in patterns if key not in PATTERNS] if SHOW_DETECTED_PATTERN: cv2.namedWindow('Pattern', cv2.WINDOW_NORMAL) # number of positive detected patterns: success = [] # list[N_SAMPLES] of random camera parameters fx, fy, cx, cy, k1, k2, k3, p1, p2 = [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of given-detected parameters: errl, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l = [ ], [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of magnitude(difference of displacement vector # array): dxl = [] dyl = [] # maintain aspect ratio of din a4, a3...: aspect_ratio_DIN = 2.0**0.5 width = int(round(HEIGHT / aspect_ratio_DIN)) if CAMERA_PARAM is None: CAMERA_PARAM = [ HEIGHT, HEIGHT, HEIGHT / 2, width / 2, 0.0, 0.01, 0.1, 0.01, 0.001] # ???CREATE N DIFFERENT RANDOM LENS ERRORS: for n in range(N_SAMPLES): # TODO: RANDOMIZE CAMERA ERROR?? fx.append(CAMERA_PARAM[0]) # * np.random.uniform(1, 2) ) fy.append(CAMERA_PARAM[1]) # * np.random.uniform(1, 2) ) cx.append(CAMERA_PARAM[2]) # * np.random.uniform(0.9, 1.1) ) cy.append(CAMERA_PARAM[3]) # * np.random.uniform(0.9, 1.1) ) k1.append(CAMERA_PARAM[4]) # + np.random.uniform(-1, 1)*0.1) k2.append(CAMERA_PARAM[5]) # + np.random.uniform(-1, 1)*0.01) p1.append(CAMERA_PARAM[6]) # + np.random.uniform(0, 1)*0.1) p2.append(CAMERA_PARAM[7]) # + np.random.uniform(0, 1)*0.01) k3.append(CAMERA_PARAM[8]) # + np.random.uniform(0, 1)*0.001) L = LensDistortion() # FOR EVERY METHOD: for method, (board_size, filename) in patterns.items(): f = folder.join(filename) # LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION: svg = QtSvg.QSvgRenderer(f) image = QtGui.QImage(width * 4, HEIGHT * 4, QtGui.QImage.Format_ARGB32) image.fill(QtCore.Qt.white) # Get QPainter that paints to the image painter = QtGui.QPainter(image) svg.render(painter) # Save, image format based on file extension # f = "rendered.png" # image.save(f) # # if GRAYSCALE: # img = cv2.imread(f, cv2.IMREAD_GRAYSCALE) # else: # img = cv2.imread(f) img = qImageToArray(image) success.append([]) fxl.append([]) errl.append([]) fyl.append([]) cxl.append([]) cyl.append([]) k1l.append([]) k2l.append([]) k3l.append([]) p1l.append([]) p2l.append([]) dxl.append([]) dyl.append([]) imgHeight, imgWidth = img.shape[0], img.shape[1] for n in range(N_SAMPLES): L.calibrate(board_size, method) print('SET PARAMS:', fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L.setCameraParams( fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L._coeffs['shape'] = (imgHeight, imgWidth) hw = imgWidth * 0.5 hh = imgHeight * 0.5 for m in range(N_IMAGES): pts1 = np.float32([[hw, hh + 100], [hw - 100, hh - 100], [hw + 100, hh - 100]]) pts2 = pts1.copy() if ROTATION: rotatePolygon(pts2, np.random.randint(0, 2 * np.pi)) if PERSPECTIVE: # CREATE A RANDOM PERSPECTIVE: pts2 += np.random.randint(-hw * 0.05, hh * 0.05, size=(3, 2)) # MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE: pts2 *= RELATIVE_PATTERN_SIZE # MOVE TO THE CENTER pts2[:, 0] += hw * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * (1 - RELATIVE_PATTERN_SIZE) if POSITION: f = ((2 * np.random.rand(2)) - 1) pts2[:, 0] += hw * 0.7 * f[0] * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * 0.7 * f[1] * (1 - RELATIVE_PATTERN_SIZE) # EXEC PERSPECTICE, POSITION, ROTATION: M = cv2.getAffineTransform(pts1, pts2) img_warped = cv2.warpAffine( img, M, (imgWidth, imgHeight), borderValue=(230, 230, 230)) # DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR # WARPING DOWN img_warped = cv2.resize(img_warped, (width, HEIGHT)) # CREATE THE LENS DISTORTION: mapx, mapy = L.getDistortRectifyMap(width, HEIGHT) # print 664, mapx.shape img_distorted = cv2.remap( img_warped, mapx, mapy, cv2.INTER_LINEAR, borderValue=(230, 230, 230)) # img_distorted[img_distorted==0]=20 # img_distorted[img_distorted>100]=230 if BLUR: img_distorted = cv2.blur(img_distorted, BLUR) if NOISE: # soften, black and white more gray, and add noise img_distorted = img_distorted.astype(np.int16) img_distorted += (np.random.rand(*img_distorted.shape) * NOISE).astype(img_distorted.dtype) img_distorted = np.clip( img_distorted, 0, 255).astype(np.uint8) # plt.imshow(img_distorted) # plt.show() found = L.addImg(img_distorted) if SHOW_DETECTED_PATTERN and found: img_distorted = L.drawChessboard(img_distorted) cv2.imshow('Pattern', img_distorted) cv2.waitKey(1) success[-1].append(L.findCount) try: L._coeffs = None errl[-1].append(L.coeffs['reprojectionError']) L.correct(img_distorted) c = L.getCameraParams() print('GET PARAMS:', c) fxl[-1].append(fx[n] - c[0]) fyl[-1].append(fy[n] - c[1]) cxl[-1].append(cx[n] - c[2]) cyl[-1].append(cy[n] - c[3]) k1l[-1].append(k1[n] - c[4]) k2l[-1].append(k2[n] - c[5]) k3l[-1].append(k3[n] - c[6]) p1l[-1].append(p1[n] - c[7]) p2l[-1].append(p2[n] - c[8]) if PLOT_ERROR_ARRAY: dx = (mapx - L.mapx) / 2 dy = (mapy - L.mapy) / 2 dxl[-1].append(dx) dyl[-1].append(dy) except NothingFound: print( "Couldn't create a calibration because no patterns were detected") del painter # AVERAGE SAMPLES AND GET STD dx_std, dx_mean = [], [] dy_std, dy_mean = [], [] mag = [] std = [] for patterndx, patterndy in zip(dxl, dyl): x = np.mean(patterndx, axis=0) dx_mean.append(x) y = np.mean(patterndy, axis=0) dy_mean.append(y) x = np.std(patterndx, axis=0) mag.append((x**2 + y**2)**0.5) dx_std.append(x) y = np.std(patterndy, axis=0) dy_std.append(y) std.append((x**2 + y**2)**0.5) # PLOT p = len(patterns) if PLOT_RESULTS: fig, axs = plt.subplots(nrows=2, ncols=5) axs = np.array(axs).ravel() for ax, typ, tname in zip(axs, (success, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l), ('Success rate', 'fx', 'fy', 'cx', 'cy', 'k1', 'k2', 'k3', 'p1', 'p2') ): ax.set_title(tname) # , showmeans=True, meanline=True)#labels=patterns.keys()) ax.boxplot(typ, notch=0, sym='+', vert=1, whis=1.5) # , ha=ha[n]) ax.set_xticklabels(patterns.keys(), rotation=40, fontsize=8) if PLOT_ERROR_ARRAY: mmin = np.min(mag) mmax = np.max(mag) smin = np.min(std) smax = np.max(std) plt.figure() for n, pattern in enumerate(patterns.keys()): plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper', vmin=mmin, vmax=mmax) if n == p - 1: plt.colorbar(label='Average') plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper', vmin=smin, vmax=smax) if n == p - 1: plt.colorbar(label='Standard deviation') fig = plt.figure() fig.suptitle('Individually scaled') for n, pattern in enumerate(patterns.keys()): # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() plt.quiver( X, Y, dy_mean[n][::ix, ::iy] * 20, dx_mean[n][::ix, ::iy] * 20) plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper') plt.colorbar() # plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10) ############################################# fig = plt.figure() fig.suptitle('Spatial uncertainty + deflection') for n, pattern in enumerate(patterns.keys()): L.calibrate(board_size, method) # there is alot of additional calc thats not necassary: L.setCameraParams( fx[0], fy[0], cx[0], cy[0], k1[0], k2[0], k3[0], p1[0], p2[0]) L._coeffs['shape'] = (imgHeight, imgWidth) L._coeffs['reprojectionError'] = np.mean(errl[n]) # deflection_x, deflection_y = L.getDeflection(width, HEIGHT) # deflection_x += dx_mean[n] # deflection_y += dy_mean[n] ux, uy = L.standardUncertainties() plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() # DEFLECTION plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(np.linalg.norm([ux, uy], axis=0), origin='upper') plt.colorbar() # DEFL: VECTORS # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.quiver(X, Y, ux[::ix, ::iy] * 20, uy[::ix, ::iy] * 20) if PLOT_ERROR_ARRAY or PLOT_RESULTS: plt.show() return dx_mean, dy_mean
[ "def", "simulateSytematicError", "(", "N_SAMPLES", "=", "5", ",", "N_IMAGES", "=", "10", ",", "SHOW_DETECTED_PATTERN", "=", "True", ",", "# GRAYSCALE=False,\r", "HEIGHT", "=", "500", ",", "PLOT_RESULTS", "=", "True", ",", "PLOT_ERROR_ARRAY", "=", "True", ",", "CAMERA_PARAM", "=", "None", ",", "PERSPECTIVE", "=", "True", ",", "ROTATION", "=", "True", ",", "RELATIVE_PATTERN_SIZE", "=", "0.5", ",", "POSITION", "=", "True", ",", "NOISE", "=", "25", ",", "BLUR", "=", "(", "3", ",", "3", ")", ",", "PATTERNS", "=", "None", ")", ":", "print", "(", "'calculate systematic error of the implemented calibration algorithms'", ")", "# LOCATION OF PATTERN IMAGES\r", "folder", "=", "MEDIA_PATH", "if", "PATTERNS", "is", "None", ":", "PATTERNS", "=", "(", "'Chessboard'", ",", "'Asymmetric circles'", ",", "'Symmetric circles'", ")", "patterns", "=", "OrderedDict", "(", "(", "# n of inner corners\r", "(", "'Chessboard'", ",", "(", "(", "6", ",", "9", ")", ",", "'chessboard_pattern_a3.svg'", ")", ")", ",", "(", "'Asymmetric circles'", ",", "(", "(", "4", ",", "11", ")", ",", "'acircles_pattern_a3.svg'", ")", ")", ",", "(", "'Symmetric circles'", ",", "(", "(", "8", ",", "11", ")", ",", "'circles_pattern_a3.svg'", ")", ")", ",", ")", ")", "# REMOVE PATTERNS THAT ARE NOT TO BE TESTED:\r", "[", "patterns", ".", "pop", "(", "key", ")", "for", "key", "in", "patterns", "if", "key", "not", "in", "PATTERNS", "]", "if", "SHOW_DETECTED_PATTERN", ":", "cv2", ".", "namedWindow", "(", "'Pattern'", ",", "cv2", ".", "WINDOW_NORMAL", ")", "# number of positive detected patterns:\r", "success", "=", "[", "]", "# list[N_SAMPLES] of random camera parameters\r", "fx", ",", "fy", ",", "cx", ",", "cy", ",", "k1", ",", "k2", ",", "k3", ",", "p1", ",", "p2", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "# list[Method, N_SAMPLES] of given-detected parameters:\r", "errl", ",", "fxl", ",", "fyl", ",", "cxl", ",", "cyl", ",", "k1l", ",", "k2l", ",", "k3l", ",", "p1l", ",", "p2l", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "# list[Method, N_SAMPLES] of magnitude(difference of displacement vector\r", "# array):\r", "dxl", "=", "[", "]", "dyl", "=", "[", "]", "# maintain aspect ratio of din a4, a3...:\r", "aspect_ratio_DIN", "=", "2.0", "**", "0.5", "width", "=", "int", "(", "round", "(", "HEIGHT", "/", "aspect_ratio_DIN", ")", ")", "if", "CAMERA_PARAM", "is", "None", ":", "CAMERA_PARAM", "=", "[", "HEIGHT", ",", "HEIGHT", ",", "HEIGHT", "/", "2", ",", "width", "/", "2", ",", "0.0", ",", "0.01", ",", "0.1", ",", "0.01", ",", "0.001", "]", "# ???CREATE N DIFFERENT RANDOM LENS ERRORS:\r", "for", "n", "in", "range", "(", "N_SAMPLES", ")", ":", "# TODO: RANDOMIZE CAMERA ERROR??\r", "fx", ".", "append", "(", "CAMERA_PARAM", "[", "0", "]", ")", "# * np.random.uniform(1, 2) )\r", "fy", ".", "append", "(", "CAMERA_PARAM", "[", "1", "]", ")", "# * np.random.uniform(1, 2) )\r", "cx", ".", "append", "(", "CAMERA_PARAM", "[", "2", "]", ")", "# * np.random.uniform(0.9, 1.1) )\r", "cy", ".", "append", "(", "CAMERA_PARAM", "[", "3", "]", ")", "# * np.random.uniform(0.9, 1.1) )\r", "k1", ".", "append", "(", "CAMERA_PARAM", "[", "4", "]", ")", "# + np.random.uniform(-1, 1)*0.1)\r", "k2", ".", "append", "(", "CAMERA_PARAM", "[", "5", "]", ")", "# + np.random.uniform(-1, 1)*0.01)\r", "p1", ".", "append", "(", "CAMERA_PARAM", "[", "6", "]", ")", "# + np.random.uniform(0, 1)*0.1)\r", "p2", ".", "append", "(", "CAMERA_PARAM", "[", "7", "]", ")", "# + np.random.uniform(0, 1)*0.01)\r", "k3", ".", "append", "(", "CAMERA_PARAM", "[", "8", "]", ")", "# + np.random.uniform(0, 1)*0.001)\r", "L", "=", "LensDistortion", "(", ")", "# FOR EVERY METHOD:\r", "for", "method", ",", "(", "board_size", ",", "filename", ")", "in", "patterns", ".", "items", "(", ")", ":", "f", "=", "folder", ".", "join", "(", "filename", ")", "# LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION:\r", "svg", "=", "QtSvg", ".", "QSvgRenderer", "(", "f", ")", "image", "=", "QtGui", ".", "QImage", "(", "width", "*", "4", ",", "HEIGHT", "*", "4", ",", "QtGui", ".", "QImage", ".", "Format_ARGB32", ")", "image", ".", "fill", "(", "QtCore", ".", "Qt", ".", "white", ")", "# Get QPainter that paints to the image\r", "painter", "=", "QtGui", ".", "QPainter", "(", "image", ")", "svg", ".", "render", "(", "painter", ")", "# Save, image format based on file extension\r", "# f = \"rendered.png\"\r", "# image.save(f)\r", "#\r", "# if GRAYSCALE:\r", "# img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)\r", "# else:\r", "# img = cv2.imread(f)\r", "img", "=", "qImageToArray", "(", "image", ")", "success", ".", "append", "(", "[", "]", ")", "fxl", ".", "append", "(", "[", "]", ")", "errl", ".", "append", "(", "[", "]", ")", "fyl", ".", "append", "(", "[", "]", ")", "cxl", ".", "append", "(", "[", "]", ")", "cyl", ".", "append", "(", "[", "]", ")", "k1l", ".", "append", "(", "[", "]", ")", "k2l", ".", "append", "(", "[", "]", ")", "k3l", ".", "append", "(", "[", "]", ")", "p1l", ".", "append", "(", "[", "]", ")", "p2l", ".", "append", "(", "[", "]", ")", "dxl", ".", "append", "(", "[", "]", ")", "dyl", ".", "append", "(", "[", "]", ")", "imgHeight", ",", "imgWidth", "=", "img", ".", "shape", "[", "0", "]", ",", "img", ".", "shape", "[", "1", "]", "for", "n", "in", "range", "(", "N_SAMPLES", ")", ":", "L", ".", "calibrate", "(", "board_size", ",", "method", ")", "print", "(", "'SET PARAMS:'", ",", "fx", "[", "n", "]", ",", "fy", "[", "n", "]", ",", "cx", "[", "n", "]", ",", "cy", "[", "n", "]", ",", "k1", "[", "n", "]", ",", "k2", "[", "n", "]", ",", "k3", "[", "n", "]", ",", "p1", "[", "n", "]", ",", "p2", "[", "n", "]", ")", "L", ".", "setCameraParams", "(", "fx", "[", "n", "]", ",", "fy", "[", "n", "]", ",", "cx", "[", "n", "]", ",", "cy", "[", "n", "]", ",", "k1", "[", "n", "]", ",", "k2", "[", "n", "]", ",", "k3", "[", "n", "]", ",", "p1", "[", "n", "]", ",", "p2", "[", "n", "]", ")", "L", ".", "_coeffs", "[", "'shape'", "]", "=", "(", "imgHeight", ",", "imgWidth", ")", "hw", "=", "imgWidth", "*", "0.5", "hh", "=", "imgHeight", "*", "0.5", "for", "m", "in", "range", "(", "N_IMAGES", ")", ":", "pts1", "=", "np", ".", "float32", "(", "[", "[", "hw", ",", "hh", "+", "100", "]", ",", "[", "hw", "-", "100", ",", "hh", "-", "100", "]", ",", "[", "hw", "+", "100", ",", "hh", "-", "100", "]", "]", ")", "pts2", "=", "pts1", ".", "copy", "(", ")", "if", "ROTATION", ":", "rotatePolygon", "(", "pts2", ",", "np", ".", "random", ".", "randint", "(", "0", ",", "2", "*", "np", ".", "pi", ")", ")", "if", "PERSPECTIVE", ":", "# CREATE A RANDOM PERSPECTIVE:\r", "pts2", "+=", "np", ".", "random", ".", "randint", "(", "-", "hw", "*", "0.05", ",", "hh", "*", "0.05", ",", "size", "=", "(", "3", ",", "2", ")", ")", "# MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE:\r", "pts2", "*=", "RELATIVE_PATTERN_SIZE", "# MOVE TO THE CENTER\r", "pts2", "[", ":", ",", "0", "]", "+=", "hw", "*", "(", "1", "-", "RELATIVE_PATTERN_SIZE", ")", "pts2", "[", ":", ",", "1", "]", "+=", "hh", "*", "(", "1", "-", "RELATIVE_PATTERN_SIZE", ")", "if", "POSITION", ":", "f", "=", "(", "(", "2", "*", "np", ".", "random", ".", "rand", "(", "2", ")", ")", "-", "1", ")", "pts2", "[", ":", ",", "0", "]", "+=", "hw", "*", "0.7", "*", "f", "[", "0", "]", "*", "(", "1", "-", "RELATIVE_PATTERN_SIZE", ")", "pts2", "[", ":", ",", "1", "]", "+=", "hh", "*", "0.7", "*", "f", "[", "1", "]", "*", "(", "1", "-", "RELATIVE_PATTERN_SIZE", ")", "# EXEC PERSPECTICE, POSITION, ROTATION:\r", "M", "=", "cv2", ".", "getAffineTransform", "(", "pts1", ",", "pts2", ")", "img_warped", "=", "cv2", ".", "warpAffine", "(", "img", ",", "M", ",", "(", "imgWidth", ",", "imgHeight", ")", ",", "borderValue", "=", "(", "230", ",", "230", ",", "230", ")", ")", "# DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR\r", "# WARPING DOWN\r", "img_warped", "=", "cv2", ".", "resize", "(", "img_warped", ",", "(", "width", ",", "HEIGHT", ")", ")", "# CREATE THE LENS DISTORTION:\r", "mapx", ",", "mapy", "=", "L", ".", "getDistortRectifyMap", "(", "width", ",", "HEIGHT", ")", "# print 664, mapx.shape\r", "img_distorted", "=", "cv2", ".", "remap", "(", "img_warped", ",", "mapx", ",", "mapy", ",", "cv2", ".", "INTER_LINEAR", ",", "borderValue", "=", "(", "230", ",", "230", ",", "230", ")", ")", "# img_distorted[img_distorted==0]=20\r", "# img_distorted[img_distorted>100]=230\r", "if", "BLUR", ":", "img_distorted", "=", "cv2", ".", "blur", "(", "img_distorted", ",", "BLUR", ")", "if", "NOISE", ":", "# soften, black and white more gray, and add noise\r", "img_distorted", "=", "img_distorted", ".", "astype", "(", "np", ".", "int16", ")", "img_distorted", "+=", "(", "np", ".", "random", ".", "rand", "(", "*", "img_distorted", ".", "shape", ")", "*", "NOISE", ")", ".", "astype", "(", "img_distorted", ".", "dtype", ")", "img_distorted", "=", "np", ".", "clip", "(", "img_distorted", ",", "0", ",", "255", ")", ".", "astype", "(", "np", ".", "uint8", ")", "# plt.imshow(img_distorted)\r", "# plt.show()\r", "found", "=", "L", ".", "addImg", "(", "img_distorted", ")", "if", "SHOW_DETECTED_PATTERN", "and", "found", ":", "img_distorted", "=", "L", ".", "drawChessboard", "(", "img_distorted", ")", "cv2", ".", "imshow", "(", "'Pattern'", ",", "img_distorted", ")", "cv2", ".", "waitKey", "(", "1", ")", "success", "[", "-", "1", "]", ".", "append", "(", "L", ".", "findCount", ")", "try", ":", "L", ".", "_coeffs", "=", "None", "errl", "[", "-", "1", "]", ".", "append", "(", "L", ".", "coeffs", "[", "'reprojectionError'", "]", ")", "L", ".", "correct", "(", "img_distorted", ")", "c", "=", "L", ".", "getCameraParams", "(", ")", "print", "(", "'GET PARAMS:'", ",", "c", ")", "fxl", "[", "-", "1", "]", ".", "append", "(", "fx", "[", "n", "]", "-", "c", "[", "0", "]", ")", "fyl", "[", "-", "1", "]", ".", "append", "(", "fy", "[", "n", "]", "-", "c", "[", "1", "]", ")", "cxl", "[", "-", "1", "]", ".", "append", "(", "cx", "[", "n", "]", "-", "c", "[", "2", "]", ")", "cyl", "[", "-", "1", "]", ".", "append", "(", "cy", "[", "n", "]", "-", "c", "[", "3", "]", ")", "k1l", "[", "-", "1", "]", ".", "append", "(", "k1", "[", "n", "]", "-", "c", "[", "4", "]", ")", "k2l", "[", "-", "1", "]", ".", "append", "(", "k2", "[", "n", "]", "-", "c", "[", "5", "]", ")", "k3l", "[", "-", "1", "]", ".", "append", "(", "k3", "[", "n", "]", "-", "c", "[", "6", "]", ")", "p1l", "[", "-", "1", "]", ".", "append", "(", "p1", "[", "n", "]", "-", "c", "[", "7", "]", ")", "p2l", "[", "-", "1", "]", ".", "append", "(", "p2", "[", "n", "]", "-", "c", "[", "8", "]", ")", "if", "PLOT_ERROR_ARRAY", ":", "dx", "=", "(", "mapx", "-", "L", ".", "mapx", ")", "/", "2", "dy", "=", "(", "mapy", "-", "L", ".", "mapy", ")", "/", "2", "dxl", "[", "-", "1", "]", ".", "append", "(", "dx", ")", "dyl", "[", "-", "1", "]", ".", "append", "(", "dy", ")", "except", "NothingFound", ":", "print", "(", "\"Couldn't create a calibration because no patterns were detected\"", ")", "del", "painter", "# AVERAGE SAMPLES AND GET STD\r", "dx_std", ",", "dx_mean", "=", "[", "]", ",", "[", "]", "dy_std", ",", "dy_mean", "=", "[", "]", ",", "[", "]", "mag", "=", "[", "]", "std", "=", "[", "]", "for", "patterndx", ",", "patterndy", "in", "zip", "(", "dxl", ",", "dyl", ")", ":", "x", "=", "np", ".", "mean", "(", "patterndx", ",", "axis", "=", "0", ")", "dx_mean", ".", "append", "(", "x", ")", "y", "=", "np", ".", "mean", "(", "patterndy", ",", "axis", "=", "0", ")", "dy_mean", ".", "append", "(", "y", ")", "x", "=", "np", ".", "std", "(", "patterndx", ",", "axis", "=", "0", ")", "mag", ".", "append", "(", "(", "x", "**", "2", "+", "y", "**", "2", ")", "**", "0.5", ")", "dx_std", ".", "append", "(", "x", ")", "y", "=", "np", ".", "std", "(", "patterndy", ",", "axis", "=", "0", ")", "dy_std", ".", "append", "(", "y", ")", "std", ".", "append", "(", "(", "x", "**", "2", "+", "y", "**", "2", ")", "**", "0.5", ")", "# PLOT\r", "p", "=", "len", "(", "patterns", ")", "if", "PLOT_RESULTS", ":", "fig", ",", "axs", "=", "plt", ".", "subplots", "(", "nrows", "=", "2", ",", "ncols", "=", "5", ")", "axs", "=", "np", ".", "array", "(", "axs", ")", ".", "ravel", "(", ")", "for", "ax", ",", "typ", ",", "tname", "in", "zip", "(", "axs", ",", "(", "success", ",", "fxl", ",", "fyl", ",", "cxl", ",", "cyl", ",", "k1l", ",", "k2l", ",", "k3l", ",", "p1l", ",", "p2l", ")", ",", "(", "'Success rate'", ",", "'fx'", ",", "'fy'", ",", "'cx'", ",", "'cy'", ",", "'k1'", ",", "'k2'", ",", "'k3'", ",", "'p1'", ",", "'p2'", ")", ")", ":", "ax", ".", "set_title", "(", "tname", ")", "# , showmeans=True, meanline=True)#labels=patterns.keys())\r", "ax", ".", "boxplot", "(", "typ", ",", "notch", "=", "0", ",", "sym", "=", "'+'", ",", "vert", "=", "1", ",", "whis", "=", "1.5", ")", "# , ha=ha[n])\r", "ax", ".", "set_xticklabels", "(", "patterns", ".", "keys", "(", ")", ",", "rotation", "=", "40", ",", "fontsize", "=", "8", ")", "if", "PLOT_ERROR_ARRAY", ":", "mmin", "=", "np", ".", "min", "(", "mag", ")", "mmax", "=", "np", ".", "max", "(", "mag", ")", "smin", "=", "np", ".", "min", "(", "std", ")", "smax", "=", "np", ".", "max", "(", "std", ")", "plt", ".", "figure", "(", ")", "for", "n", ",", "pattern", "in", "enumerate", "(", "patterns", ".", "keys", "(", ")", ")", ":", "plt", ".", "subplot", "(", "int", "(", "'2%s%s'", "%", "(", "p", ",", "n", "+", "1", ")", ")", ",", "axisbg", "=", "'g'", ")", "plt", ".", "title", "(", "pattern", ")", "plt", ".", "imshow", "(", "mag", "[", "n", "]", ",", "origin", "=", "'upper'", ",", "vmin", "=", "mmin", ",", "vmax", "=", "mmax", ")", "if", "n", "==", "p", "-", "1", ":", "plt", ".", "colorbar", "(", "label", "=", "'Average'", ")", "plt", ".", "subplot", "(", "int", "(", "'2%s%s'", "%", "(", "p", ",", "n", "+", "p", "+", "1", ")", ")", ",", "axisbg", "=", "'g'", ")", "plt", ".", "title", "(", "pattern", ")", "plt", ".", "imshow", "(", "std", "[", "n", "]", ",", "origin", "=", "'upper'", ",", "vmin", "=", "smin", ",", "vmax", "=", "smax", ")", "if", "n", "==", "p", "-", "1", ":", "plt", ".", "colorbar", "(", "label", "=", "'Standard deviation'", ")", "fig", "=", "plt", ".", "figure", "(", ")", "fig", ".", "suptitle", "(", "'Individually scaled'", ")", "for", "n", ",", "pattern", "in", "enumerate", "(", "patterns", ".", "keys", "(", ")", ")", ":", "# downscale - show max 30 arrows each dimension\r", "sy", ",", "sx", "=", "dx_mean", "[", "n", "]", ".", "shape", "ix", "=", "int", "(", "sx", "/", "15", ")", "if", "ix", "<", "1", ":", "ix", "=", "1", "iy", "=", "int", "(", "sy", "/", "15", ")", "if", "iy", "<", "1", ":", "iy", "=", "1", "Y", ",", "X", "=", "np", ".", "meshgrid", "(", "np", ".", "arange", "(", "0", ",", "sy", ",", "iy", ")", ",", "np", ".", "arange", "(", "0", ",", "sx", ",", "ix", ")", ")", "plt", ".", "subplot", "(", "int", "(", "'2%s%s'", "%", "(", "p", ",", "n", "+", "1", ")", ")", ",", "axisbg", "=", "'g'", ")", "plt", ".", "title", "(", "pattern", ")", "plt", ".", "imshow", "(", "mag", "[", "n", "]", ",", "origin", "=", "'upper'", ")", "plt", ".", "colorbar", "(", ")", "plt", ".", "quiver", "(", "X", ",", "Y", ",", "dy_mean", "[", "n", "]", "[", ":", ":", "ix", ",", ":", ":", "iy", "]", "*", "20", ",", "dx_mean", "[", "n", "]", "[", ":", ":", "ix", ",", ":", ":", "iy", "]", "*", "20", ")", "plt", ".", "subplot", "(", "int", "(", "'2%s%s'", "%", "(", "p", ",", "n", "+", "p", "+", "1", ")", ")", ",", "axisbg", "=", "'g'", ")", "plt", ".", "title", "(", "pattern", ")", "plt", ".", "imshow", "(", "std", "[", "n", "]", ",", "origin", "=", "'upper'", ")", "plt", ".", "colorbar", "(", ")", "# plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10)\r", "#############################################\r", "fig", "=", "plt", ".", "figure", "(", ")", "fig", ".", "suptitle", "(", "'Spatial uncertainty + deflection'", ")", "for", "n", ",", "pattern", "in", "enumerate", "(", "patterns", ".", "keys", "(", ")", ")", ":", "L", ".", "calibrate", "(", "board_size", ",", "method", ")", "# there is alot of additional calc thats not necassary:\r", "L", ".", "setCameraParams", "(", "fx", "[", "0", "]", ",", "fy", "[", "0", "]", ",", "cx", "[", "0", "]", ",", "cy", "[", "0", "]", ",", "k1", "[", "0", "]", ",", "k2", "[", "0", "]", ",", "k3", "[", "0", "]", ",", "p1", "[", "0", "]", ",", "p2", "[", "0", "]", ")", "L", ".", "_coeffs", "[", "'shape'", "]", "=", "(", "imgHeight", ",", "imgWidth", ")", "L", ".", "_coeffs", "[", "'reprojectionError'", "]", "=", "np", ".", "mean", "(", "errl", "[", "n", "]", ")", "# deflection_x, deflection_y = L.getDeflection(width, HEIGHT)\r", "# deflection_x += dx_mean[n]\r", "# deflection_y += dy_mean[n]\r", "ux", ",", "uy", "=", "L", ".", "standardUncertainties", "(", ")", "plt", ".", "subplot", "(", "int", "(", "'2%s%s'", "%", "(", "p", ",", "n", "+", "1", ")", ")", ",", "axisbg", "=", "'g'", ")", "plt", ".", "title", "(", "pattern", ")", "plt", ".", "imshow", "(", "mag", "[", "n", "]", ",", "origin", "=", "'upper'", ")", "plt", ".", "colorbar", "(", ")", "# DEFLECTION\r", "plt", ".", "subplot", "(", "int", "(", "'2%s%s'", "%", "(", "p", ",", "n", "+", "p", "+", "1", ")", ")", ",", "axisbg", "=", "'g'", ")", "plt", ".", "title", "(", "pattern", ")", "plt", ".", "imshow", "(", "np", ".", "linalg", ".", "norm", "(", "[", "ux", ",", "uy", "]", ",", "axis", "=", "0", ")", ",", "origin", "=", "'upper'", ")", "plt", ".", "colorbar", "(", ")", "# DEFL: VECTORS\r", "# downscale - show max 30 arrows each dimension\r", "sy", ",", "sx", "=", "dx_mean", "[", "n", "]", ".", "shape", "ix", "=", "int", "(", "sx", "/", "15", ")", "if", "ix", "<", "1", ":", "ix", "=", "1", "iy", "=", "int", "(", "sy", "/", "15", ")", "if", "iy", "<", "1", ":", "iy", "=", "1", "Y", ",", "X", "=", "np", ".", "meshgrid", "(", "np", ".", "arange", "(", "0", ",", "sy", ",", "iy", ")", ",", "np", ".", "arange", "(", "0", ",", "sx", ",", "ix", ")", ")", "plt", ".", "quiver", "(", "X", ",", "Y", ",", "ux", "[", ":", ":", "ix", ",", ":", ":", "iy", "]", "*", "20", ",", "uy", "[", ":", ":", "ix", ",", ":", ":", "iy", "]", "*", "20", ")", "if", "PLOT_ERROR_ARRAY", "or", "PLOT_RESULTS", ":", "plt", ".", "show", "(", ")", "return", "dx_mean", ",", "dy_mean" ]
Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2]
[ "Simulates", "a", "lens", "calibration", "using", "synthetic", "images", "*", "images", "are", "rendered", "under", "the", "given", "HEIGHT", "resolution", "*", "noise", "and", "smoothing", "is", "applied", "*", "perspective", "and", "position", "errors", "are", "applied", "*", "images", "are", "deformed", "using", "the", "given", "CAMERA_PARAM", "*", "the", "detected", "camera", "parameters", "are", "used", "to", "calculate", "the", "error", "to", "the", "given", "ones", "simulation", "-----------", "N_IMAGES", "-", ">", "number", "of", "images", "to", "take", "for", "a", "camera", "calibration", "N_SAMPLES", "-", ">", "number", "of", "camera", "calibrations", "of", "each", "pattern", "type", "output", "--------", "SHOW_DETECTED_PATTERN", ":", "print", "each", "image", "and", "detected", "pattern", "to", "screen", "PLOT_RESULTS", ":", "plot", "boxplots", "of", "the", "mean", "error", "and", "std", "of", "the", "camera", "parameters", "PLOT_ERROR_ARRAY", ":", "plot", "position", "error", "for", "the", "lens", "correction", "pattern", "--------", "this", "simulation", "tests", "the", "openCV", "standard", "patterns", ":", "chess", "board", "asymmetric", "and", "symmetric", "circles", "GRAYSCALE", ":", "whether", "to", "load", "the", "pattern", "as", "gray", "scale", "RELATIVE_PATTERN_SIZE", ":", "the", "relative", "size", "of", "the", "pattern", "within", "the", "image", "(", "0", ".", "4", "-", ">", "40%", ")", "PERSPECTIVE", ":", "[", "True", "]", "-", ">", "enable", "perspective", "distortion", "ROTATION", ":", "[", "True", "]", "-", ">", "enable", "rotation", "of", "the", "pattern", "BLUR", ":", "False", "or", "(", "sizex", "sizey", ")", "like", "(", "3", "3", ")", "CAMERA_PARAM", ":", "camera", "calibration", "parameters", "as", "[", "fx", "fy", "cx", "cy", "k1", "k2", "k3", "p1", "p2", "]" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/lens/estimateSystematicErrorLensCorrection.py#L24-L384
radjkarl/imgProcessor
imgProcessor/scripts/_FitHistogramPeaks.py
plotSet
def plotSet(imgDir, posExTime, outDir, show_legend, show_plots, save_to_file, ftype): ''' creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram from all images within [imgDir] posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5) outDir - dirname to save the output images show_legend - True/False show_plots - display the result on screen save_to_file - save the result to file ftype - file type of the output images ''' xvals = [] hist = [] peaks = [] exTimes = [] max_border = 0 if not imgDir.exists(): raise Exception("image dir doesn't exist") for n, f in enumerate(imgDir): print(f) try: # if imgDir.join(f).isfile(): img = imgDir.join(f) s = FitHistogramPeaks(img) xvals.append(s.xvals) hist.append(s.yvals) # smoothedHist.append(s.yvals2) peaks.append(s.fitValues()) if s.border() > max_border: max_border = s.plotBorder() exTimes.append(float(f[posExTime[0]:posExTime[1] + 1])) except: pass nx = 2 ny = int(len(hist) // nx) + len(hist) % nx fig, ax = plt.subplots(ny, nx) # flatten 2d-ax list: if nx > 1: ax = [list(i) for i in zip(*ax)] # transpose 2d-list axx = [] for xa in ax: for ya in xa: axx.append(ya) ax = axx for x, h, p, e, a in zip(xvals, hist, peaks, exTimes, ax): a.plot(x, h, label='histogram', thickness=3) # l1 = a.plot(x, s, label='smoothed') for n, pi in enumerate(p): l2 = a.plot(x, pi, label='peak %s' % n, thickness=6) a.set_xlim(xmin=0, xmax=max_border) a.set_title('%s s' % e) # plt.setp([l1,l2], linewidth=2)#, linestyle='--', color='r') # set # both to dashed l1 = ax[0].legend() # loc='upper center', bbox_to_anchor=(0.7, 1.05), l1.draw_frame(False) plt.xlabel('pixel value') plt.ylabel('number of pixels') fig = plt.gcf() fig.set_size_inches(7 * nx, 3 * ny) if save_to_file: p = PathStr(outDir).join('result').setFiletype(ftype) plt.savefig(p, bbox_inches='tight') if show_plots: plt.show()
python
def plotSet(imgDir, posExTime, outDir, show_legend, show_plots, save_to_file, ftype): ''' creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram from all images within [imgDir] posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5) outDir - dirname to save the output images show_legend - True/False show_plots - display the result on screen save_to_file - save the result to file ftype - file type of the output images ''' xvals = [] hist = [] peaks = [] exTimes = [] max_border = 0 if not imgDir.exists(): raise Exception("image dir doesn't exist") for n, f in enumerate(imgDir): print(f) try: # if imgDir.join(f).isfile(): img = imgDir.join(f) s = FitHistogramPeaks(img) xvals.append(s.xvals) hist.append(s.yvals) # smoothedHist.append(s.yvals2) peaks.append(s.fitValues()) if s.border() > max_border: max_border = s.plotBorder() exTimes.append(float(f[posExTime[0]:posExTime[1] + 1])) except: pass nx = 2 ny = int(len(hist) // nx) + len(hist) % nx fig, ax = plt.subplots(ny, nx) # flatten 2d-ax list: if nx > 1: ax = [list(i) for i in zip(*ax)] # transpose 2d-list axx = [] for xa in ax: for ya in xa: axx.append(ya) ax = axx for x, h, p, e, a in zip(xvals, hist, peaks, exTimes, ax): a.plot(x, h, label='histogram', thickness=3) # l1 = a.plot(x, s, label='smoothed') for n, pi in enumerate(p): l2 = a.plot(x, pi, label='peak %s' % n, thickness=6) a.set_xlim(xmin=0, xmax=max_border) a.set_title('%s s' % e) # plt.setp([l1,l2], linewidth=2)#, linestyle='--', color='r') # set # both to dashed l1 = ax[0].legend() # loc='upper center', bbox_to_anchor=(0.7, 1.05), l1.draw_frame(False) plt.xlabel('pixel value') plt.ylabel('number of pixels') fig = plt.gcf() fig.set_size_inches(7 * nx, 3 * ny) if save_to_file: p = PathStr(outDir).join('result').setFiletype(ftype) plt.savefig(p, bbox_inches='tight') if show_plots: plt.show()
[ "def", "plotSet", "(", "imgDir", ",", "posExTime", ",", "outDir", ",", "show_legend", ",", "show_plots", ",", "save_to_file", ",", "ftype", ")", ":", "xvals", "=", "[", "]", "hist", "=", "[", "]", "peaks", "=", "[", "]", "exTimes", "=", "[", "]", "max_border", "=", "0", "if", "not", "imgDir", ".", "exists", "(", ")", ":", "raise", "Exception", "(", "\"image dir doesn't exist\"", ")", "for", "n", ",", "f", "in", "enumerate", "(", "imgDir", ")", ":", "print", "(", "f", ")", "try", ":", "# if imgDir.join(f).isfile():\r", "img", "=", "imgDir", ".", "join", "(", "f", ")", "s", "=", "FitHistogramPeaks", "(", "img", ")", "xvals", ".", "append", "(", "s", ".", "xvals", ")", "hist", ".", "append", "(", "s", ".", "yvals", ")", "# smoothedHist.append(s.yvals2)\r", "peaks", ".", "append", "(", "s", ".", "fitValues", "(", ")", ")", "if", "s", ".", "border", "(", ")", ">", "max_border", ":", "max_border", "=", "s", ".", "plotBorder", "(", ")", "exTimes", ".", "append", "(", "float", "(", "f", "[", "posExTime", "[", "0", "]", ":", "posExTime", "[", "1", "]", "+", "1", "]", ")", ")", "except", ":", "pass", "nx", "=", "2", "ny", "=", "int", "(", "len", "(", "hist", ")", "//", "nx", ")", "+", "len", "(", "hist", ")", "%", "nx", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "ny", ",", "nx", ")", "# flatten 2d-ax list:\r", "if", "nx", ">", "1", ":", "ax", "=", "[", "list", "(", "i", ")", "for", "i", "in", "zip", "(", "*", "ax", ")", "]", "# transpose 2d-list\r", "axx", "=", "[", "]", "for", "xa", "in", "ax", ":", "for", "ya", "in", "xa", ":", "axx", ".", "append", "(", "ya", ")", "ax", "=", "axx", "for", "x", ",", "h", ",", "p", ",", "e", ",", "a", "in", "zip", "(", "xvals", ",", "hist", ",", "peaks", ",", "exTimes", ",", "ax", ")", ":", "a", ".", "plot", "(", "x", ",", "h", ",", "label", "=", "'histogram'", ",", "thickness", "=", "3", ")", "# l1 = a.plot(x, s, label='smoothed')\r", "for", "n", ",", "pi", "in", "enumerate", "(", "p", ")", ":", "l2", "=", "a", ".", "plot", "(", "x", ",", "pi", ",", "label", "=", "'peak %s'", "%", "n", ",", "thickness", "=", "6", ")", "a", ".", "set_xlim", "(", "xmin", "=", "0", ",", "xmax", "=", "max_border", ")", "a", ".", "set_title", "(", "'%s s'", "%", "e", ")", "# plt.setp([l1,l2], linewidth=2)#, linestyle='--', color='r') # set\r", "# both to dashed\r", "l1", "=", "ax", "[", "0", "]", ".", "legend", "(", ")", "# loc='upper center', bbox_to_anchor=(0.7, 1.05),\r", "l1", ".", "draw_frame", "(", "False", ")", "plt", ".", "xlabel", "(", "'pixel value'", ")", "plt", ".", "ylabel", "(", "'number of pixels'", ")", "fig", "=", "plt", ".", "gcf", "(", ")", "fig", ".", "set_size_inches", "(", "7", "*", "nx", ",", "3", "*", "ny", ")", "if", "save_to_file", ":", "p", "=", "PathStr", "(", "outDir", ")", ".", "join", "(", "'result'", ")", ".", "setFiletype", "(", "ftype", ")", "plt", ".", "savefig", "(", "p", ",", "bbox_inches", "=", "'tight'", ")", "if", "show_plots", ":", "plt", ".", "show", "(", ")" ]
creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram from all images within [imgDir] posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5) outDir - dirname to save the output images show_legend - True/False show_plots - display the result on screen save_to_file - save the result to file ftype - file type of the output images
[ "creates", "plots", "showing", "both", "found", "GAUSSIAN", "peaks", "the", "histogram", "a", "smoothed", "histogram", "from", "all", "images", "within", "[", "imgDir", "]", "posExTime", "-", "position", "range", "of", "the", "exposure", "time", "in", "the", "image", "name", "e", ".", "g", ".", ":", "img_30s", ".", "jpg", "-", ">", "(", "4", "5", ")", "outDir", "-", "dirname", "to", "save", "the", "output", "images", "show_legend", "-", "True", "/", "False", "show_plots", "-", "display", "the", "result", "on", "screen", "save_to_file", "-", "save", "the", "result", "to", "file", "ftype", "-", "file", "type", "of", "the", "output", "images" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/scripts/_FitHistogramPeaks.py#L47-L126
radjkarl/imgProcessor
imgProcessor/camera/flatField/flatFieldFromCloseDistance.py
flatFieldFromCloseDistance
def flatFieldFromCloseDistance(imgs, bg_imgs=None): ''' Average multiple images of a homogeneous device imaged directly in front the camera lens. if [bg_imgs] are not given, background level is extracted from 1% of the cumulative intensity distribution of the averaged [imgs] This measurement method is referred as 'Method A' in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- ''' img = imgAverage(imgs) bg = getBackground2(bg_imgs, img) img -= bg img = toGray(img) mx = median_filter(img[::10, ::10], 3).max() img /= mx return img
python
def flatFieldFromCloseDistance(imgs, bg_imgs=None): ''' Average multiple images of a homogeneous device imaged directly in front the camera lens. if [bg_imgs] are not given, background level is extracted from 1% of the cumulative intensity distribution of the averaged [imgs] This measurement method is referred as 'Method A' in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- ''' img = imgAverage(imgs) bg = getBackground2(bg_imgs, img) img -= bg img = toGray(img) mx = median_filter(img[::10, ::10], 3).max() img /= mx return img
[ "def", "flatFieldFromCloseDistance", "(", "imgs", ",", "bg_imgs", "=", "None", ")", ":", "img", "=", "imgAverage", "(", "imgs", ")", "bg", "=", "getBackground2", "(", "bg_imgs", ",", "img", ")", "img", "-=", "bg", "img", "=", "toGray", "(", "img", ")", "mx", "=", "median_filter", "(", "img", "[", ":", ":", "10", ",", ":", ":", "10", "]", ",", "3", ")", ".", "max", "(", ")", "img", "/=", "mx", "return", "img" ]
Average multiple images of a homogeneous device imaged directly in front the camera lens. if [bg_imgs] are not given, background level is extracted from 1% of the cumulative intensity distribution of the averaged [imgs] This measurement method is referred as 'Method A' in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 ---
[ "Average", "multiple", "images", "of", "a", "homogeneous", "device", "imaged", "directly", "in", "front", "the", "camera", "lens", ".", "if", "[", "bg_imgs", "]", "are", "not", "given", "background", "level", "is", "extracted", "from", "1%", "of", "the", "cumulative", "intensity", "distribution", "of", "the", "averaged", "[", "imgs", "]", "This", "measurement", "method", "is", "referred", "as", "Method", "A", "in", "---", "K", ".", "Bedrich", "M", ".", "Bokalic", "et", "al", ".", ":", "ELECTROLUMINESCENCE", "IMAGING", "OF", "PV", "DEVICES", ":", "ADVANCED", "FLAT", "FIELD", "CALIBRATION", "2017", "---" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/flatFieldFromCloseDistance.py#L16-L38
radjkarl/imgProcessor
imgProcessor/camera/flatField/flatFieldFromCloseDistance.py
flatFieldFromCloseDistance2
def flatFieldFromCloseDistance2(images, bgImages=None, calcStd=False, nlf=None, nstd=6): ''' Same as [flatFieldFromCloseDistance]. Differences are: ... single-time-effect removal included ... returns the standard deviation of the image average [calcStd=True] Optional: ----------- calcStd -> set to True to also return the standard deviation nlf -> noise level function (callable) nstd -> artefact needs to deviate more than [nstd] to be removed ''' if len(images) > 1: # start with brightest images def fn(img): img = imread(img) s0, s1 = img.shape[:2] # rough approx. of image brightness: return -img[::s0 // 10, ::s1 // 10].min() images = sorted(images, key=lambda i: fn(i)) avgBg = getBackground2(bgImages, images[1]) i0 = imread(images[0], dtype=float) - avgBg i1 = imread(images[1], dtype=float) - avgBg if nlf is None: nlf = oneImageNLF(i0, i1)[0] det = SingleTimeEffectDetection( (i0, i1), nlf, nStd=nstd, calcVariance=calcStd) for i in images[1:]: i = imread(i) # exclude erroneously darker areas: thresh = det.noSTE - nlf(det.noSTE) * nstd mask = i > thresh # filter STE: det.addImage(i, mask) ma = det.noSTE else: ma = imread(images[0], dtype=float) - avgBg # fast artifact free maximum: mx = median_filter(ma[::10, ::10], 3).max() if calcStd: return ma / mx, det.mma.var**0.5 / mx return ma / mx
python
def flatFieldFromCloseDistance2(images, bgImages=None, calcStd=False, nlf=None, nstd=6): ''' Same as [flatFieldFromCloseDistance]. Differences are: ... single-time-effect removal included ... returns the standard deviation of the image average [calcStd=True] Optional: ----------- calcStd -> set to True to also return the standard deviation nlf -> noise level function (callable) nstd -> artefact needs to deviate more than [nstd] to be removed ''' if len(images) > 1: # start with brightest images def fn(img): img = imread(img) s0, s1 = img.shape[:2] # rough approx. of image brightness: return -img[::s0 // 10, ::s1 // 10].min() images = sorted(images, key=lambda i: fn(i)) avgBg = getBackground2(bgImages, images[1]) i0 = imread(images[0], dtype=float) - avgBg i1 = imread(images[1], dtype=float) - avgBg if nlf is None: nlf = oneImageNLF(i0, i1)[0] det = SingleTimeEffectDetection( (i0, i1), nlf, nStd=nstd, calcVariance=calcStd) for i in images[1:]: i = imread(i) # exclude erroneously darker areas: thresh = det.noSTE - nlf(det.noSTE) * nstd mask = i > thresh # filter STE: det.addImage(i, mask) ma = det.noSTE else: ma = imread(images[0], dtype=float) - avgBg # fast artifact free maximum: mx = median_filter(ma[::10, ::10], 3).max() if calcStd: return ma / mx, det.mma.var**0.5 / mx return ma / mx
[ "def", "flatFieldFromCloseDistance2", "(", "images", ",", "bgImages", "=", "None", ",", "calcStd", "=", "False", ",", "nlf", "=", "None", ",", "nstd", "=", "6", ")", ":", "if", "len", "(", "images", ")", ">", "1", ":", "# start with brightest images\r", "def", "fn", "(", "img", ")", ":", "img", "=", "imread", "(", "img", ")", "s0", ",", "s1", "=", "img", ".", "shape", "[", ":", "2", "]", "# rough approx. of image brightness:\r", "return", "-", "img", "[", ":", ":", "s0", "//", "10", ",", ":", ":", "s1", "//", "10", "]", ".", "min", "(", ")", "images", "=", "sorted", "(", "images", ",", "key", "=", "lambda", "i", ":", "fn", "(", "i", ")", ")", "avgBg", "=", "getBackground2", "(", "bgImages", ",", "images", "[", "1", "]", ")", "i0", "=", "imread", "(", "images", "[", "0", "]", ",", "dtype", "=", "float", ")", "-", "avgBg", "i1", "=", "imread", "(", "images", "[", "1", "]", ",", "dtype", "=", "float", ")", "-", "avgBg", "if", "nlf", "is", "None", ":", "nlf", "=", "oneImageNLF", "(", "i0", ",", "i1", ")", "[", "0", "]", "det", "=", "SingleTimeEffectDetection", "(", "(", "i0", ",", "i1", ")", ",", "nlf", ",", "nStd", "=", "nstd", ",", "calcVariance", "=", "calcStd", ")", "for", "i", "in", "images", "[", "1", ":", "]", ":", "i", "=", "imread", "(", "i", ")", "# exclude erroneously darker areas:\r", "thresh", "=", "det", ".", "noSTE", "-", "nlf", "(", "det", ".", "noSTE", ")", "*", "nstd", "mask", "=", "i", ">", "thresh", "# filter STE:\r", "det", ".", "addImage", "(", "i", ",", "mask", ")", "ma", "=", "det", ".", "noSTE", "else", ":", "ma", "=", "imread", "(", "images", "[", "0", "]", ",", "dtype", "=", "float", ")", "-", "avgBg", "# fast artifact free maximum:\r", "mx", "=", "median_filter", "(", "ma", "[", ":", ":", "10", ",", ":", ":", "10", "]", ",", "3", ")", ".", "max", "(", ")", "if", "calcStd", ":", "return", "ma", "/", "mx", ",", "det", ".", "mma", ".", "var", "**", "0.5", "/", "mx", "return", "ma", "/", "mx" ]
Same as [flatFieldFromCloseDistance]. Differences are: ... single-time-effect removal included ... returns the standard deviation of the image average [calcStd=True] Optional: ----------- calcStd -> set to True to also return the standard deviation nlf -> noise level function (callable) nstd -> artefact needs to deviate more than [nstd] to be removed
[ "Same", "as", "[", "flatFieldFromCloseDistance", "]", ".", "Differences", "are", ":", "...", "single", "-", "time", "-", "effect", "removal", "included", "...", "returns", "the", "standard", "deviation", "of", "the", "image", "average", "[", "calcStd", "=", "True", "]", "Optional", ":", "-----------", "calcStd", "-", ">", "set", "to", "True", "to", "also", "return", "the", "standard", "deviation", "nlf", "-", ">", "noise", "level", "function", "(", "callable", ")", "nstd", "-", ">", "artefact", "needs", "to", "deviate", "more", "than", "[", "nstd", "]", "to", "be", "removed" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/flatFieldFromCloseDistance.py#L41-L96
radjkarl/imgProcessor
imgProcessor/measure/SNR/SNR_hinken.py
SNR_hinken
def SNR_hinken(imgs, bg=0, roi=None): ''' signal-to-noise ratio (SNR) as mean(images) / std(images) as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766) works on unloaded images no memory overload if too many images are given ''' mean = None M = len(imgs) if bg is not 0: bg = imread(bg)[roi] if roi is not None: bg = bg[roi] #calc mean: for i in imgs: img = imread(i).asfarray() if roi is not None: img = img[roi] img -= bg if mean is None: #init mean = np.zeros_like(img) std = np.zeros_like(img) mean += img del img mean /= M #calc std of mean: for i in imgs: img = imread(i).asfarray() if roi is not None: img = img[roi] img -= bg std += (mean - img)**2 del img std = (std / M)**0.5 return mean.mean() / std.mean()
python
def SNR_hinken(imgs, bg=0, roi=None): ''' signal-to-noise ratio (SNR) as mean(images) / std(images) as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766) works on unloaded images no memory overload if too many images are given ''' mean = None M = len(imgs) if bg is not 0: bg = imread(bg)[roi] if roi is not None: bg = bg[roi] #calc mean: for i in imgs: img = imread(i).asfarray() if roi is not None: img = img[roi] img -= bg if mean is None: #init mean = np.zeros_like(img) std = np.zeros_like(img) mean += img del img mean /= M #calc std of mean: for i in imgs: img = imread(i).asfarray() if roi is not None: img = img[roi] img -= bg std += (mean - img)**2 del img std = (std / M)**0.5 return mean.mean() / std.mean()
[ "def", "SNR_hinken", "(", "imgs", ",", "bg", "=", "0", ",", "roi", "=", "None", ")", ":", "mean", "=", "None", "M", "=", "len", "(", "imgs", ")", "if", "bg", "is", "not", "0", ":", "bg", "=", "imread", "(", "bg", ")", "[", "roi", "]", "if", "roi", "is", "not", "None", ":", "bg", "=", "bg", "[", "roi", "]", "#calc mean:\r", "for", "i", "in", "imgs", ":", "img", "=", "imread", "(", "i", ")", ".", "asfarray", "(", ")", "if", "roi", "is", "not", "None", ":", "img", "=", "img", "[", "roi", "]", "img", "-=", "bg", "if", "mean", "is", "None", ":", "#init\r", "mean", "=", "np", ".", "zeros_like", "(", "img", ")", "std", "=", "np", ".", "zeros_like", "(", "img", ")", "mean", "+=", "img", "del", "img", "mean", "/=", "M", "#calc std of mean:\r", "for", "i", "in", "imgs", ":", "img", "=", "imread", "(", "i", ")", ".", "asfarray", "(", ")", "if", "roi", "is", "not", "None", ":", "img", "=", "img", "[", "roi", "]", "img", "-=", "bg", "std", "+=", "(", "mean", "-", "img", ")", "**", "2", "del", "img", "std", "=", "(", "std", "/", "M", ")", "**", "0.5", "return", "mean", ".", "mean", "(", ")", "/", "std", ".", "mean", "(", ")" ]
signal-to-noise ratio (SNR) as mean(images) / std(images) as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766) works on unloaded images no memory overload if too many images are given
[ "signal", "-", "to", "-", "noise", "ratio", "(", "SNR", ")", "as", "mean", "(", "images", ")", "/", "std", "(", "images", ")", "as", "defined", "in", "Hinken", "et", ".", "al", ".", "2011", "(", "DOI", ":", "10", ".", "1063", "/", "1", ".", "3541766", ")", "works", "on", "unloaded", "images", "no", "memory", "overload", "if", "too", "many", "images", "are", "given" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/SNR/SNR_hinken.py#L8-L44
radjkarl/imgProcessor
imgProcessor/transform/boolImg.py
boolMasksToImage
def boolMasksToImage(masks): ''' Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int) to one 8bit image ''' assert len(masks) <= 8, 'can only transform up to 8 masks into image' masks = np.asarray(masks, dtype=np.uint8) assert masks.ndim == 3, 'layers need to be stack of 2d arrays' return np.packbits(masks, axis=0)[0].T
python
def boolMasksToImage(masks): ''' Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int) to one 8bit image ''' assert len(masks) <= 8, 'can only transform up to 8 masks into image' masks = np.asarray(masks, dtype=np.uint8) assert masks.ndim == 3, 'layers need to be stack of 2d arrays' return np.packbits(masks, axis=0)[0].T
[ "def", "boolMasksToImage", "(", "masks", ")", ":", "assert", "len", "(", "masks", ")", "<=", "8", ",", "'can only transform up to 8 masks into image'", "masks", "=", "np", ".", "asarray", "(", "masks", ",", "dtype", "=", "np", ".", "uint8", ")", "assert", "masks", ".", "ndim", "==", "3", ",", "'layers need to be stack of 2d arrays'", "return", "np", ".", "packbits", "(", "masks", ",", "axis", "=", "0", ")", "[", "0", "]", ".", "T" ]
Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int) to one 8bit image
[ "Transform", "at", "maximum", "8", "bool", "layers", "--", ">", "2d", "arrays", "dtype", "=", "(", "bool", "int", ")", "to", "one", "8bit", "image" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/boolImg.py#L4-L12
radjkarl/imgProcessor
imgProcessor/transform/boolImg.py
imageToBoolMasks
def imageToBoolMasks(arr): '''inverse of [boolMasksToImage]''' assert arr.dtype == np.uint8, 'image needs to be dtype=uint8' masks = np.unpackbits(arr).reshape(*arr.shape, 8) return np.swapaxes(masks, 2, 0)
python
def imageToBoolMasks(arr): '''inverse of [boolMasksToImage]''' assert arr.dtype == np.uint8, 'image needs to be dtype=uint8' masks = np.unpackbits(arr).reshape(*arr.shape, 8) return np.swapaxes(masks, 2, 0)
[ "def", "imageToBoolMasks", "(", "arr", ")", ":", "assert", "arr", ".", "dtype", "==", "np", ".", "uint8", ",", "'image needs to be dtype=uint8'", "masks", "=", "np", ".", "unpackbits", "(", "arr", ")", ".", "reshape", "(", "*", "arr", ".", "shape", ",", "8", ")", "return", "np", ".", "swapaxes", "(", "masks", ",", "2", ",", "0", ")" ]
inverse of [boolMasksToImage]
[ "inverse", "of", "[", "boolMasksToImage", "]" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/boolImg.py#L15-L19
radjkarl/imgProcessor
imgProcessor/utils/calcAspectRatioFromCorners.py
calcAspectRatioFromCorners
def calcAspectRatioFromCorners(corners, in_plane=False): ''' simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation ''' q = corners l0 = [q[0, 0], q[0, 1], q[1, 0], q[1, 1]] l1 = [q[0, 0], q[0, 1], q[-1, 0], q[-1, 1]] l2 = [q[2, 0], q[2, 1], q[3, 0], q[3, 1]] l3 = [q[2, 0], q[2, 1], q[1, 0], q[1, 1]] a1 = line.length(l0) / line.length(l1) a2 = line.length(l2) / line.length(l3) if in_plane: # take aspect ration from more rectangular corner if (abs(0.5 * np.pi - abs(line.angle2(l0, l1))) < abs(0.5 * np.pi - abs(line.angle2(l2, l3)))): return a1 else: return a2 return 0.5 * (a1 + a2)
python
def calcAspectRatioFromCorners(corners, in_plane=False): ''' simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation ''' q = corners l0 = [q[0, 0], q[0, 1], q[1, 0], q[1, 1]] l1 = [q[0, 0], q[0, 1], q[-1, 0], q[-1, 1]] l2 = [q[2, 0], q[2, 1], q[3, 0], q[3, 1]] l3 = [q[2, 0], q[2, 1], q[1, 0], q[1, 1]] a1 = line.length(l0) / line.length(l1) a2 = line.length(l2) / line.length(l3) if in_plane: # take aspect ration from more rectangular corner if (abs(0.5 * np.pi - abs(line.angle2(l0, l1))) < abs(0.5 * np.pi - abs(line.angle2(l2, l3)))): return a1 else: return a2 return 0.5 * (a1 + a2)
[ "def", "calcAspectRatioFromCorners", "(", "corners", ",", "in_plane", "=", "False", ")", ":", "q", "=", "corners", "l0", "=", "[", "q", "[", "0", ",", "0", "]", ",", "q", "[", "0", ",", "1", "]", ",", "q", "[", "1", ",", "0", "]", ",", "q", "[", "1", ",", "1", "]", "]", "l1", "=", "[", "q", "[", "0", ",", "0", "]", ",", "q", "[", "0", ",", "1", "]", ",", "q", "[", "-", "1", ",", "0", "]", ",", "q", "[", "-", "1", ",", "1", "]", "]", "l2", "=", "[", "q", "[", "2", ",", "0", "]", ",", "q", "[", "2", ",", "1", "]", ",", "q", "[", "3", ",", "0", "]", ",", "q", "[", "3", ",", "1", "]", "]", "l3", "=", "[", "q", "[", "2", ",", "0", "]", ",", "q", "[", "2", ",", "1", "]", ",", "q", "[", "1", ",", "0", "]", ",", "q", "[", "1", ",", "1", "]", "]", "a1", "=", "line", ".", "length", "(", "l0", ")", "/", "line", ".", "length", "(", "l1", ")", "a2", "=", "line", ".", "length", "(", "l2", ")", "/", "line", ".", "length", "(", "l3", ")", "if", "in_plane", ":", "# take aspect ration from more rectangular corner\r", "if", "(", "abs", "(", "0.5", "*", "np", ".", "pi", "-", "abs", "(", "line", ".", "angle2", "(", "l0", ",", "l1", ")", ")", ")", "<", "abs", "(", "0.5", "*", "np", ".", "pi", "-", "abs", "(", "line", ".", "angle2", "(", "l2", ",", "l3", ")", ")", ")", ")", ":", "return", "a1", "else", ":", "return", "a2", "return", "0.5", "*", "(", "a1", "+", "a2", ")" ]
simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation
[ "simple", "and", "better", "alg", ".", "than", "below", "in_plane", "-", ">", "whether", "object", "has", "no", "tilt", "but", "only", "rotation", "and", "translation" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/calcAspectRatioFromCorners.py#L8-L32
radjkarl/imgProcessor
imgProcessor/utils/putTextAlpha.py
putTextAlpha
def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color, thickness): # , lineType=None ''' Extends cv2.putText with [alpha] argument ''' x, y = cv2.getTextSize(text, fontFace, fontScale, thickness)[0] ox, oy = org imgcut = img[oy - y - 3:oy, ox:ox + x] if img.ndim == 3: txtarr = np.zeros(shape=(y + 3, x, 3), dtype=np.uint8) else: txtarr = np.zeros(shape=(y + 3, x), dtype=np.uint8) cv2.putText(txtarr, text, (0, y), fontFace, fontScale, color, thickness=thickness #, lineType=lineType ) cv2.addWeighted(txtarr, alpha, imgcut, 1, 0, imgcut, -1) return img
python
def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color, thickness): # , lineType=None ''' Extends cv2.putText with [alpha] argument ''' x, y = cv2.getTextSize(text, fontFace, fontScale, thickness)[0] ox, oy = org imgcut = img[oy - y - 3:oy, ox:ox + x] if img.ndim == 3: txtarr = np.zeros(shape=(y + 3, x, 3), dtype=np.uint8) else: txtarr = np.zeros(shape=(y + 3, x), dtype=np.uint8) cv2.putText(txtarr, text, (0, y), fontFace, fontScale, color, thickness=thickness #, lineType=lineType ) cv2.addWeighted(txtarr, alpha, imgcut, 1, 0, imgcut, -1) return img
[ "def", "putTextAlpha", "(", "img", ",", "text", ",", "alpha", ",", "org", ",", "fontFace", ",", "fontScale", ",", "color", ",", "thickness", ")", ":", "# , lineType=None\r", "x", ",", "y", "=", "cv2", ".", "getTextSize", "(", "text", ",", "fontFace", ",", "fontScale", ",", "thickness", ")", "[", "0", "]", "ox", ",", "oy", "=", "org", "imgcut", "=", "img", "[", "oy", "-", "y", "-", "3", ":", "oy", ",", "ox", ":", "ox", "+", "x", "]", "if", "img", ".", "ndim", "==", "3", ":", "txtarr", "=", "np", ".", "zeros", "(", "shape", "=", "(", "y", "+", "3", ",", "x", ",", "3", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "else", ":", "txtarr", "=", "np", ".", "zeros", "(", "shape", "=", "(", "y", "+", "3", ",", "x", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "cv2", ".", "putText", "(", "txtarr", ",", "text", ",", "(", "0", ",", "y", ")", ",", "fontFace", ",", "fontScale", ",", "color", ",", "thickness", "=", "thickness", "#, lineType=lineType\r", ")", "cv2", ".", "addWeighted", "(", "txtarr", ",", "alpha", ",", "imgcut", ",", "1", ",", "0", ",", "imgcut", ",", "-", "1", ")", "return", "img" ]
Extends cv2.putText with [alpha] argument
[ "Extends", "cv2", ".", "putText", "with", "[", "alpha", "]", "argument" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/putTextAlpha.py#L10-L35
radjkarl/imgProcessor
imgProcessor/filters/fastMean.py
fastMean
def fastMean(img, f=10, inplace=False): ''' for bigger ksizes it if often faster to resize an image rather than blur it... ''' s0,s1 = img.shape[:2] ss0 = int(round(s0/f)) ss1 = int(round(s1/f)) small = cv2.resize(img,(ss1,ss0), interpolation=cv2.INTER_AREA) #bigger k = {'interpolation':cv2.INTER_LINEAR} if inplace: k['dst']=img return cv2.resize(small,(s1,s0), **k)
python
def fastMean(img, f=10, inplace=False): ''' for bigger ksizes it if often faster to resize an image rather than blur it... ''' s0,s1 = img.shape[:2] ss0 = int(round(s0/f)) ss1 = int(round(s1/f)) small = cv2.resize(img,(ss1,ss0), interpolation=cv2.INTER_AREA) #bigger k = {'interpolation':cv2.INTER_LINEAR} if inplace: k['dst']=img return cv2.resize(small,(s1,s0), **k)
[ "def", "fastMean", "(", "img", ",", "f", "=", "10", ",", "inplace", "=", "False", ")", ":", "s0", ",", "s1", "=", "img", ".", "shape", "[", ":", "2", "]", "ss0", "=", "int", "(", "round", "(", "s0", "/", "f", ")", ")", "ss1", "=", "int", "(", "round", "(", "s1", "/", "f", ")", ")", "small", "=", "cv2", ".", "resize", "(", "img", ",", "(", "ss1", ",", "ss0", ")", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ")", "#bigger\r", "k", "=", "{", "'interpolation'", ":", "cv2", ".", "INTER_LINEAR", "}", "if", "inplace", ":", "k", "[", "'dst'", "]", "=", "img", "return", "cv2", ".", "resize", "(", "small", ",", "(", "s1", ",", "s0", ")", ",", "*", "*", "k", ")" ]
for bigger ksizes it if often faster to resize an image rather than blur it...
[ "for", "bigger", "ksizes", "it", "if", "often", "faster", "to", "resize", "an", "image", "rather", "than", "blur", "it", "..." ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/fastMean.py#L5-L19
radjkarl/imgProcessor
setup.py
read
def read(*paths): """Build a file path from *paths* and return the contents.""" try: f_name = os.path.join(*paths) with open(f_name, 'r') as f: return f.read() except IOError: print('%s not existing ... skipping' % f_name) return ''
python
def read(*paths): """Build a file path from *paths* and return the contents.""" try: f_name = os.path.join(*paths) with open(f_name, 'r') as f: return f.read() except IOError: print('%s not existing ... skipping' % f_name) return ''
[ "def", "read", "(", "*", "paths", ")", ":", "try", ":", "f_name", "=", "os", ".", "path", ".", "join", "(", "*", "paths", ")", "with", "open", "(", "f_name", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "IOError", ":", "print", "(", "'%s not existing ... skipping'", "%", "f_name", ")", "return", "''" ]
Build a file path from *paths* and return the contents.
[ "Build", "a", "file", "path", "from", "*", "paths", "*", "and", "return", "the", "contents", "." ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/setup.py#L16-L24
radjkarl/imgProcessor
imgProcessor/camera/DarkCurrentMap.py
averageSameExpTimes
def averageSameExpTimes(imgs_path): ''' average background images with same exposure time ''' firsts = imgs_path[:2] imgs = imgs_path[2:] for n, i in enumerate(firsts): firsts[n] = np.asfarray(imread(i)) d = DarkCurrentMap(firsts) for i in imgs: i = imread(i) d.addImg(i) return d.map()
python
def averageSameExpTimes(imgs_path): ''' average background images with same exposure time ''' firsts = imgs_path[:2] imgs = imgs_path[2:] for n, i in enumerate(firsts): firsts[n] = np.asfarray(imread(i)) d = DarkCurrentMap(firsts) for i in imgs: i = imread(i) d.addImg(i) return d.map()
[ "def", "averageSameExpTimes", "(", "imgs_path", ")", ":", "firsts", "=", "imgs_path", "[", ":", "2", "]", "imgs", "=", "imgs_path", "[", "2", ":", "]", "for", "n", ",", "i", "in", "enumerate", "(", "firsts", ")", ":", "firsts", "[", "n", "]", "=", "np", ".", "asfarray", "(", "imread", "(", "i", ")", ")", "d", "=", "DarkCurrentMap", "(", "firsts", ")", "for", "i", "in", "imgs", ":", "i", "=", "imread", "(", "i", ")", "d", ".", "addImg", "(", "i", ")", "return", "d", ".", "map", "(", ")" ]
average background images with same exposure time
[ "average", "background", "images", "with", "same", "exposure", "time" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/DarkCurrentMap.py#L46-L58
radjkarl/imgProcessor
imgProcessor/camera/DarkCurrentMap.py
getLinearityFunction
def getLinearityFunction(expTimes, imgs, mxIntensity=65535, min_ascent=0.001, ): ''' returns offset, ascent of image(expTime) = offset + ascent*expTime ''' # TODO: calculate [min_ascent] from noise function # instead of having it as variable ascent, offset, error = linRegressUsingMasked2dArrays( expTimes, imgs, imgs > mxIntensity) ascent[np.isnan(ascent)] = 0 # remove low frequent noise: if min_ascent > 0: i = ascent < min_ascent offset[i] += (0.5 * (np.min(expTimes) + np.max(expTimes))) * ascent[i] ascent[i] = 0 return offset, ascent, error
python
def getLinearityFunction(expTimes, imgs, mxIntensity=65535, min_ascent=0.001, ): ''' returns offset, ascent of image(expTime) = offset + ascent*expTime ''' # TODO: calculate [min_ascent] from noise function # instead of having it as variable ascent, offset, error = linRegressUsingMasked2dArrays( expTimes, imgs, imgs > mxIntensity) ascent[np.isnan(ascent)] = 0 # remove low frequent noise: if min_ascent > 0: i = ascent < min_ascent offset[i] += (0.5 * (np.min(expTimes) + np.max(expTimes))) * ascent[i] ascent[i] = 0 return offset, ascent, error
[ "def", "getLinearityFunction", "(", "expTimes", ",", "imgs", ",", "mxIntensity", "=", "65535", ",", "min_ascent", "=", "0.001", ",", ")", ":", "# TODO: calculate [min_ascent] from noise function\r", "# instead of having it as variable\r", "ascent", ",", "offset", ",", "error", "=", "linRegressUsingMasked2dArrays", "(", "expTimes", ",", "imgs", ",", "imgs", ">", "mxIntensity", ")", "ascent", "[", "np", ".", "isnan", "(", "ascent", ")", "]", "=", "0", "# remove low frequent noise:\r", "if", "min_ascent", ">", "0", ":", "i", "=", "ascent", "<", "min_ascent", "offset", "[", "i", "]", "+=", "(", "0.5", "*", "(", "np", ".", "min", "(", "expTimes", ")", "+", "np", ".", "max", "(", "expTimes", ")", ")", ")", "*", "ascent", "[", "i", "]", "ascent", "[", "i", "]", "=", "0", "return", "offset", ",", "ascent", ",", "error" ]
returns offset, ascent of image(expTime) = offset + ascent*expTime
[ "returns", "offset", "ascent", "of", "image", "(", "expTime", ")", "=", "offset", "+", "ascent", "*", "expTime" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/DarkCurrentMap.py#L61-L80
radjkarl/imgProcessor
imgProcessor/camera/DarkCurrentMap.py
sortForSameExpTime
def sortForSameExpTime(expTimes, img_paths): # , excludeSingleImg=True): ''' return image paths sorted for same exposure time ''' d = {} for e, i in zip(expTimes, img_paths): if e not in d: d[e] = [] d[e].append(i) # for key in list(d.keys()): # if len(d[key]) == 1: # print('have only one image of exposure time [%s]' % key) # print('--> exclude that one') # d.pop(key) d = OrderedDict(sorted(d.items())) return list(d.keys()), list(d.values())
python
def sortForSameExpTime(expTimes, img_paths): # , excludeSingleImg=True): ''' return image paths sorted for same exposure time ''' d = {} for e, i in zip(expTimes, img_paths): if e not in d: d[e] = [] d[e].append(i) # for key in list(d.keys()): # if len(d[key]) == 1: # print('have only one image of exposure time [%s]' % key) # print('--> exclude that one') # d.pop(key) d = OrderedDict(sorted(d.items())) return list(d.keys()), list(d.values())
[ "def", "sortForSameExpTime", "(", "expTimes", ",", "img_paths", ")", ":", "# , excludeSingleImg=True):\r", "d", "=", "{", "}", "for", "e", ",", "i", "in", "zip", "(", "expTimes", ",", "img_paths", ")", ":", "if", "e", "not", "in", "d", ":", "d", "[", "e", "]", "=", "[", "]", "d", "[", "e", "]", ".", "append", "(", "i", ")", "# for key in list(d.keys()):\r", "# if len(d[key]) == 1:\r", "# print('have only one image of exposure time [%s]' % key)\r", "# print('--> exclude that one')\r", "# d.pop(key)\r", "d", "=", "OrderedDict", "(", "sorted", "(", "d", ".", "items", "(", ")", ")", ")", "return", "list", "(", "d", ".", "keys", "(", ")", ")", ",", "list", "(", "d", ".", "values", "(", ")", ")" ]
return image paths sorted for same exposure time
[ "return", "image", "paths", "sorted", "for", "same", "exposure", "time" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/DarkCurrentMap.py#L83-L98
radjkarl/imgProcessor
imgProcessor/camera/DarkCurrentMap.py
getDarkCurrentAverages
def getDarkCurrentAverages(exposuretimes, imgs): ''' return exposure times, image averages for each exposure time ''' x, imgs_p = sortForSameExpTime(exposuretimes, imgs) s0, s1 = imgs[0].shape imgs = np.empty(shape=(len(x), s0, s1), dtype=imgs[0].dtype) for i, ip in zip(imgs, imgs_p): if len(ip) == 1: i[:] = ip[0] else: i[:] = averageSameExpTimes(ip) return x, imgs
python
def getDarkCurrentAverages(exposuretimes, imgs): ''' return exposure times, image averages for each exposure time ''' x, imgs_p = sortForSameExpTime(exposuretimes, imgs) s0, s1 = imgs[0].shape imgs = np.empty(shape=(len(x), s0, s1), dtype=imgs[0].dtype) for i, ip in zip(imgs, imgs_p): if len(ip) == 1: i[:] = ip[0] else: i[:] = averageSameExpTimes(ip) return x, imgs
[ "def", "getDarkCurrentAverages", "(", "exposuretimes", ",", "imgs", ")", ":", "x", ",", "imgs_p", "=", "sortForSameExpTime", "(", "exposuretimes", ",", "imgs", ")", "s0", ",", "s1", "=", "imgs", "[", "0", "]", ".", "shape", "imgs", "=", "np", ".", "empty", "(", "shape", "=", "(", "len", "(", "x", ")", ",", "s0", ",", "s1", ")", ",", "dtype", "=", "imgs", "[", "0", "]", ".", "dtype", ")", "for", "i", ",", "ip", "in", "zip", "(", "imgs", ",", "imgs_p", ")", ":", "if", "len", "(", "ip", ")", "==", "1", ":", "i", "[", ":", "]", "=", "ip", "[", "0", "]", "else", ":", "i", "[", ":", "]", "=", "averageSameExpTimes", "(", "ip", ")", "return", "x", ",", "imgs" ]
return exposure times, image averages for each exposure time
[ "return", "exposure", "times", "image", "averages", "for", "each", "exposure", "time" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/DarkCurrentMap.py#L101-L115
radjkarl/imgProcessor
imgProcessor/camera/DarkCurrentMap.py
getDarkCurrentFunction
def getDarkCurrentFunction(exposuretimes, imgs, **kwargs): ''' get dark current function from given images and exposure times ''' exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs) offs, ascent, rmse = getLinearityFunction(exposuretimes, imgs, **kwargs) return offs, ascent, rmse
python
def getDarkCurrentFunction(exposuretimes, imgs, **kwargs): ''' get dark current function from given images and exposure times ''' exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs) offs, ascent, rmse = getLinearityFunction(exposuretimes, imgs, **kwargs) return offs, ascent, rmse
[ "def", "getDarkCurrentFunction", "(", "exposuretimes", ",", "imgs", ",", "*", "*", "kwargs", ")", ":", "exposuretimes", ",", "imgs", "=", "getDarkCurrentAverages", "(", "exposuretimes", ",", "imgs", ")", "offs", ",", "ascent", ",", "rmse", "=", "getLinearityFunction", "(", "exposuretimes", ",", "imgs", ",", "*", "*", "kwargs", ")", "return", "offs", ",", "ascent", ",", "rmse" ]
get dark current function from given images and exposure times
[ "get", "dark", "current", "function", "from", "given", "images", "and", "exposure", "times" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/DarkCurrentMap.py#L118-L124
radjkarl/imgProcessor
imgProcessor/transform/alignImageAlongLine.py
alignImageAlongLine
def alignImageAlongLine(img, line, height=15, length=None, zoom=1, fast=False, borderValue=0): ''' return a sub image aligned along given line @param img - numpy.2darray input image to get subimage from @param line - list of 2 points [x0,y0,x1,y1]) @param height - height of output array in y @param length - width of output array @param zoom - zoom factor @param fast - speed up calculation using nearest neighbour interpolation @returns transformed image as numpy.2darray with found line as in the middle ''' height = int(round(height)) if height % 2 == 0: # ->is even number height += 1 # only take uneven numbers to have line in middle if length is None: length = int(round(ln.length(line))) hh = (height - 1) ll = (length - 1) # end points of the line: p0 = np.array(line[0:2], dtype=float) p1 = np.array(line[2:], dtype=float) # p2 is above middle of p0,p1: norm = np.array(ln.normal(line)) if not ln.isHoriz(line): norm *= -1 p2 = (p0 + p1) * 0.5 + norm * hh * 0.5 middleY = hh / 2 pp0 = [0, middleY] pp1 = [ll, middleY] pp2 = [ll * 0.5, hh] pts1 = np.array([p0, p1, p2], dtype=np.float32) pts2 = np.array([pp0, pp1, pp2], dtype=np.float32) if zoom != 1: length = int(round(length * zoom)) height = int(round(height * zoom)) pts2 *= zoom # TRANSFORM: M = cv2.getAffineTransform(pts1, pts2) dst = cv2.warpAffine( img, M, (length, height), flags=cv2.INTER_NEAREST if fast else cv2.INTER_LINEAR, borderValue=borderValue) return dst
python
def alignImageAlongLine(img, line, height=15, length=None, zoom=1, fast=False, borderValue=0): ''' return a sub image aligned along given line @param img - numpy.2darray input image to get subimage from @param line - list of 2 points [x0,y0,x1,y1]) @param height - height of output array in y @param length - width of output array @param zoom - zoom factor @param fast - speed up calculation using nearest neighbour interpolation @returns transformed image as numpy.2darray with found line as in the middle ''' height = int(round(height)) if height % 2 == 0: # ->is even number height += 1 # only take uneven numbers to have line in middle if length is None: length = int(round(ln.length(line))) hh = (height - 1) ll = (length - 1) # end points of the line: p0 = np.array(line[0:2], dtype=float) p1 = np.array(line[2:], dtype=float) # p2 is above middle of p0,p1: norm = np.array(ln.normal(line)) if not ln.isHoriz(line): norm *= -1 p2 = (p0 + p1) * 0.5 + norm * hh * 0.5 middleY = hh / 2 pp0 = [0, middleY] pp1 = [ll, middleY] pp2 = [ll * 0.5, hh] pts1 = np.array([p0, p1, p2], dtype=np.float32) pts2 = np.array([pp0, pp1, pp2], dtype=np.float32) if zoom != 1: length = int(round(length * zoom)) height = int(round(height * zoom)) pts2 *= zoom # TRANSFORM: M = cv2.getAffineTransform(pts1, pts2) dst = cv2.warpAffine( img, M, (length, height), flags=cv2.INTER_NEAREST if fast else cv2.INTER_LINEAR, borderValue=borderValue) return dst
[ "def", "alignImageAlongLine", "(", "img", ",", "line", ",", "height", "=", "15", ",", "length", "=", "None", ",", "zoom", "=", "1", ",", "fast", "=", "False", ",", "borderValue", "=", "0", ")", ":", "height", "=", "int", "(", "round", "(", "height", ")", ")", "if", "height", "%", "2", "==", "0", ":", "# ->is even number\r", "height", "+=", "1", "# only take uneven numbers to have line in middle\r", "if", "length", "is", "None", ":", "length", "=", "int", "(", "round", "(", "ln", ".", "length", "(", "line", ")", ")", ")", "hh", "=", "(", "height", "-", "1", ")", "ll", "=", "(", "length", "-", "1", ")", "# end points of the line:\r", "p0", "=", "np", ".", "array", "(", "line", "[", "0", ":", "2", "]", ",", "dtype", "=", "float", ")", "p1", "=", "np", ".", "array", "(", "line", "[", "2", ":", "]", ",", "dtype", "=", "float", ")", "# p2 is above middle of p0,p1:\r", "norm", "=", "np", ".", "array", "(", "ln", ".", "normal", "(", "line", ")", ")", "if", "not", "ln", ".", "isHoriz", "(", "line", ")", ":", "norm", "*=", "-", "1", "p2", "=", "(", "p0", "+", "p1", ")", "*", "0.5", "+", "norm", "*", "hh", "*", "0.5", "middleY", "=", "hh", "/", "2", "pp0", "=", "[", "0", ",", "middleY", "]", "pp1", "=", "[", "ll", ",", "middleY", "]", "pp2", "=", "[", "ll", "*", "0.5", ",", "hh", "]", "pts1", "=", "np", ".", "array", "(", "[", "p0", ",", "p1", ",", "p2", "]", ",", "dtype", "=", "np", ".", "float32", ")", "pts2", "=", "np", ".", "array", "(", "[", "pp0", ",", "pp1", ",", "pp2", "]", ",", "dtype", "=", "np", ".", "float32", ")", "if", "zoom", "!=", "1", ":", "length", "=", "int", "(", "round", "(", "length", "*", "zoom", ")", ")", "height", "=", "int", "(", "round", "(", "height", "*", "zoom", ")", ")", "pts2", "*=", "zoom", "# TRANSFORM:\r", "M", "=", "cv2", ".", "getAffineTransform", "(", "pts1", ",", "pts2", ")", "dst", "=", "cv2", ".", "warpAffine", "(", "img", ",", "M", ",", "(", "length", ",", "height", ")", ",", "flags", "=", "cv2", ".", "INTER_NEAREST", "if", "fast", "else", "cv2", ".", "INTER_LINEAR", ",", "borderValue", "=", "borderValue", ")", "return", "dst" ]
return a sub image aligned along given line @param img - numpy.2darray input image to get subimage from @param line - list of 2 points [x0,y0,x1,y1]) @param height - height of output array in y @param length - width of output array @param zoom - zoom factor @param fast - speed up calculation using nearest neighbour interpolation @returns transformed image as numpy.2darray with found line as in the middle
[ "return", "a", "sub", "image", "aligned", "along", "given", "line" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/alignImageAlongLine.py#L10-L60
radjkarl/imgProcessor
imgProcessor/uncertainty/positionToIntensityUncertainty.py
positionToIntensityUncertainty
def positionToIntensityUncertainty(image, sx, sy, kernelSize=None): ''' calculates the estimated standard deviation map from the changes of neighbouring pixels from a center pixel within a point spread function defined by a std.dev. in x and y taken from the (sx, sy) maps sx,sy -> either 2d array of same shape as [image] of single values ''' psf_is_const = not isinstance(sx, np.ndarray) if not psf_is_const: assert image.shape == sx.shape == sy.shape, \ "Image and position uncertainty maps need to have same size" if kernelSize is None: kernelSize = _kSizeFromStd(max(sx.max(), sy.max())) else: assert type(sx) in (int, float) and type(sx) in (int, float), \ "Image and position uncertainty values need to be int OR float" if kernelSize is None: kernelSize = _kSizeFromStd(max(sx, sy)) if image.dtype.kind == 'u': image = image.astype(int) # otherwise stack overflow through uint size = kernelSize // 2 if size < 1: size = 1 kernelSize = 1 + 2 * size # array to be filled by individual psf of every pixel: psf = np.zeros((kernelSize, kernelSize)) # intensity uncertainty as stdev: sint = np.zeros(image.shape) if psf_is_const: _calc_constPSF(image, sint, sx, sy, psf, size) else: _calc_variPSF(image, sint, sx, sy, psf, size) return sint
python
def positionToIntensityUncertainty(image, sx, sy, kernelSize=None): ''' calculates the estimated standard deviation map from the changes of neighbouring pixels from a center pixel within a point spread function defined by a std.dev. in x and y taken from the (sx, sy) maps sx,sy -> either 2d array of same shape as [image] of single values ''' psf_is_const = not isinstance(sx, np.ndarray) if not psf_is_const: assert image.shape == sx.shape == sy.shape, \ "Image and position uncertainty maps need to have same size" if kernelSize is None: kernelSize = _kSizeFromStd(max(sx.max(), sy.max())) else: assert type(sx) in (int, float) and type(sx) in (int, float), \ "Image and position uncertainty values need to be int OR float" if kernelSize is None: kernelSize = _kSizeFromStd(max(sx, sy)) if image.dtype.kind == 'u': image = image.astype(int) # otherwise stack overflow through uint size = kernelSize // 2 if size < 1: size = 1 kernelSize = 1 + 2 * size # array to be filled by individual psf of every pixel: psf = np.zeros((kernelSize, kernelSize)) # intensity uncertainty as stdev: sint = np.zeros(image.shape) if psf_is_const: _calc_constPSF(image, sint, sx, sy, psf, size) else: _calc_variPSF(image, sint, sx, sy, psf, size) return sint
[ "def", "positionToIntensityUncertainty", "(", "image", ",", "sx", ",", "sy", ",", "kernelSize", "=", "None", ")", ":", "psf_is_const", "=", "not", "isinstance", "(", "sx", ",", "np", ".", "ndarray", ")", "if", "not", "psf_is_const", ":", "assert", "image", ".", "shape", "==", "sx", ".", "shape", "==", "sy", ".", "shape", ",", "\"Image and position uncertainty maps need to have same size\"", "if", "kernelSize", "is", "None", ":", "kernelSize", "=", "_kSizeFromStd", "(", "max", "(", "sx", ".", "max", "(", ")", ",", "sy", ".", "max", "(", ")", ")", ")", "else", ":", "assert", "type", "(", "sx", ")", "in", "(", "int", ",", "float", ")", "and", "type", "(", "sx", ")", "in", "(", "int", ",", "float", ")", ",", "\"Image and position uncertainty values need to be int OR float\"", "if", "kernelSize", "is", "None", ":", "kernelSize", "=", "_kSizeFromStd", "(", "max", "(", "sx", ",", "sy", ")", ")", "if", "image", ".", "dtype", ".", "kind", "==", "'u'", ":", "image", "=", "image", ".", "astype", "(", "int", ")", "# otherwise stack overflow through uint\r", "size", "=", "kernelSize", "//", "2", "if", "size", "<", "1", ":", "size", "=", "1", "kernelSize", "=", "1", "+", "2", "*", "size", "# array to be filled by individual psf of every pixel:\r", "psf", "=", "np", ".", "zeros", "(", "(", "kernelSize", ",", "kernelSize", ")", ")", "# intensity uncertainty as stdev:\r", "sint", "=", "np", ".", "zeros", "(", "image", ".", "shape", ")", "if", "psf_is_const", ":", "_calc_constPSF", "(", "image", ",", "sint", ",", "sx", ",", "sy", ",", "psf", ",", "size", ")", "else", ":", "_calc_variPSF", "(", "image", ",", "sint", ",", "sx", ",", "sy", ",", "psf", ",", "size", ")", "return", "sint" ]
calculates the estimated standard deviation map from the changes of neighbouring pixels from a center pixel within a point spread function defined by a std.dev. in x and y taken from the (sx, sy) maps sx,sy -> either 2d array of same shape as [image] of single values
[ "calculates", "the", "estimated", "standard", "deviation", "map", "from", "the", "changes", "of", "neighbouring", "pixels", "from", "a", "center", "pixel", "within", "a", "point", "spread", "function", "defined", "by", "a", "std", ".", "dev", ".", "in", "x", "and", "y", "taken", "from", "the", "(", "sx", "sy", ")", "maps", "sx", "sy", "-", ">", "either", "2d", "array", "of", "same", "shape", "as", "[", "image", "]", "of", "single", "values" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/positionToIntensityUncertainty.py#L52-L87
radjkarl/imgProcessor
imgProcessor/uncertainty/positionToIntensityUncertainty.py
_coarsenImage
def _coarsenImage(image, f): ''' seems to be a more precise (but slower) way to down-scale an image ''' from skimage.morphology import square from skimage.filters import rank from skimage.transform._warps import rescale selem = square(f) arri = rank.mean(image, selem=selem) return rescale(arri, 1 / f, order=0)
python
def _coarsenImage(image, f): ''' seems to be a more precise (but slower) way to down-scale an image ''' from skimage.morphology import square from skimage.filters import rank from skimage.transform._warps import rescale selem = square(f) arri = rank.mean(image, selem=selem) return rescale(arri, 1 / f, order=0)
[ "def", "_coarsenImage", "(", "image", ",", "f", ")", ":", "from", "skimage", ".", "morphology", "import", "square", "from", "skimage", ".", "filters", "import", "rank", "from", "skimage", ".", "transform", ".", "_warps", "import", "rescale", "selem", "=", "square", "(", "f", ")", "arri", "=", "rank", ".", "mean", "(", "image", ",", "selem", "=", "selem", ")", "return", "rescale", "(", "arri", ",", "1", "/", "f", ",", "order", "=", "0", ")" ]
seems to be a more precise (but slower) way to down-scale an image
[ "seems", "to", "be", "a", "more", "precise", "(", "but", "slower", ")", "way", "to", "down", "-", "scale", "an", "image" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/positionToIntensityUncertainty.py#L94-L104
radjkarl/imgProcessor
imgProcessor/uncertainty/positionToIntensityUncertainty.py
positionToIntensityUncertaintyForPxGroup
def positionToIntensityUncertaintyForPxGroup(image, std, y0, y1, x0, x1): ''' like positionToIntensityUncertainty but calculated average uncertainty for an area [y0:y1,x0:x1] ''' fy, fx = y1 - y0, x1 - x0 if fy != fx: raise Exception('averaged area need to be square ATM') image = _coarsenImage(image, fx) k = _kSizeFromStd(std) y0 = int(round(y0 / fy)) x0 = int(round(x0 / fx)) arr = image[y0 - k:y0 + k, x0 - k:x0 + k] U = positionToIntensityUncertainty(arr, std / fx, std / fx) return U[k:-k, k:-k]
python
def positionToIntensityUncertaintyForPxGroup(image, std, y0, y1, x0, x1): ''' like positionToIntensityUncertainty but calculated average uncertainty for an area [y0:y1,x0:x1] ''' fy, fx = y1 - y0, x1 - x0 if fy != fx: raise Exception('averaged area need to be square ATM') image = _coarsenImage(image, fx) k = _kSizeFromStd(std) y0 = int(round(y0 / fy)) x0 = int(round(x0 / fx)) arr = image[y0 - k:y0 + k, x0 - k:x0 + k] U = positionToIntensityUncertainty(arr, std / fx, std / fx) return U[k:-k, k:-k]
[ "def", "positionToIntensityUncertaintyForPxGroup", "(", "image", ",", "std", ",", "y0", ",", "y1", ",", "x0", ",", "x1", ")", ":", "fy", ",", "fx", "=", "y1", "-", "y0", ",", "x1", "-", "x0", "if", "fy", "!=", "fx", ":", "raise", "Exception", "(", "'averaged area need to be square ATM'", ")", "image", "=", "_coarsenImage", "(", "image", ",", "fx", ")", "k", "=", "_kSizeFromStd", "(", "std", ")", "y0", "=", "int", "(", "round", "(", "y0", "/", "fy", ")", ")", "x0", "=", "int", "(", "round", "(", "x0", "/", "fx", ")", ")", "arr", "=", "image", "[", "y0", "-", "k", ":", "y0", "+", "k", ",", "x0", "-", "k", ":", "x0", "+", "k", "]", "U", "=", "positionToIntensityUncertainty", "(", "arr", ",", "std", "/", "fx", ",", "std", "/", "fx", ")", "return", "U", "[", "k", ":", "-", "k", ",", "k", ":", "-", "k", "]" ]
like positionToIntensityUncertainty but calculated average uncertainty for an area [y0:y1,x0:x1]
[ "like", "positionToIntensityUncertainty", "but", "calculated", "average", "uncertainty", "for", "an", "area", "[", "y0", ":", "y1", "x0", ":", "x1", "]" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/positionToIntensityUncertainty.py#L107-L121
radjkarl/imgProcessor
imgProcessor/filters/nan_maximum_filter.py
nan_maximum_filter
def nan_maximum_filter(arr, ksize): ''' same as scipy.filters.maximum_filter but working excluding nans ''' out = np.empty_like(arr) _calc(arr, out, ksize//2) return out
python
def nan_maximum_filter(arr, ksize): ''' same as scipy.filters.maximum_filter but working excluding nans ''' out = np.empty_like(arr) _calc(arr, out, ksize//2) return out
[ "def", "nan_maximum_filter", "(", "arr", ",", "ksize", ")", ":", "out", "=", "np", ".", "empty_like", "(", "arr", ")", "_calc", "(", "arr", ",", "out", ",", "ksize", "//", "2", ")", "return", "out" ]
same as scipy.filters.maximum_filter but working excluding nans
[ "same", "as", "scipy", ".", "filters", ".", "maximum_filter", "but", "working", "excluding", "nans" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/nan_maximum_filter.py#L7-L14
radjkarl/imgProcessor
imgProcessor/filters/medianThreshold.py
medianThreshold
def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True): ''' set every the pixel value of the given [img] to the median filtered one of a given kernel [size] in case the relative [threshold] is exeeded condition = '>' OR '<' ''' from scipy.ndimage import median_filter indices = None if threshold > 0: blur = np.asfarray(median_filter(img, size=size)) with np.errstate(divide='ignore', invalid='ignore', over='ignore'): if condition == '>': indices = abs((img - blur) / blur) > threshold else: indices = abs((img - blur) / blur) < threshold if copy: img = img.copy() img[indices] = blur[indices] return img, indices
python
def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True): ''' set every the pixel value of the given [img] to the median filtered one of a given kernel [size] in case the relative [threshold] is exeeded condition = '>' OR '<' ''' from scipy.ndimage import median_filter indices = None if threshold > 0: blur = np.asfarray(median_filter(img, size=size)) with np.errstate(divide='ignore', invalid='ignore', over='ignore'): if condition == '>': indices = abs((img - blur) / blur) > threshold else: indices = abs((img - blur) / blur) < threshold if copy: img = img.copy() img[indices] = blur[indices] return img, indices
[ "def", "medianThreshold", "(", "img", ",", "threshold", "=", "0.1", ",", "size", "=", "3", ",", "condition", "=", "'>'", ",", "copy", "=", "True", ")", ":", "from", "scipy", ".", "ndimage", "import", "median_filter", "indices", "=", "None", "if", "threshold", ">", "0", ":", "blur", "=", "np", ".", "asfarray", "(", "median_filter", "(", "img", ",", "size", "=", "size", ")", ")", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ",", "over", "=", "'ignore'", ")", ":", "if", "condition", "==", "'>'", ":", "indices", "=", "abs", "(", "(", "img", "-", "blur", ")", "/", "blur", ")", ">", "threshold", "else", ":", "indices", "=", "abs", "(", "(", "img", "-", "blur", ")", "/", "blur", ")", "<", "threshold", "if", "copy", ":", "img", "=", "img", ".", "copy", "(", ")", "img", "[", "indices", "]", "=", "blur", "[", "indices", "]", "return", "img", ",", "indices" ]
set every the pixel value of the given [img] to the median filtered one of a given kernel [size] in case the relative [threshold] is exeeded condition = '>' OR '<'
[ "set", "every", "the", "pixel", "value", "of", "the", "given", "[", "img", "]", "to", "the", "median", "filtered", "one", "of", "a", "given", "kernel", "[", "size", "]", "in", "case", "the", "relative", "[", "threshold", "]", "is", "exeeded", "condition", "=", ">", "OR", "<" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/medianThreshold.py#L7-L30
radjkarl/imgProcessor
imgProcessor/filters/fastFilter.py
fastFilter
def fastFilter(arr, ksize=30, every=None, resize=True, fn='median', interpolation=cv2.INTER_LANCZOS4, smoothksize=0, borderMode=cv2.BORDER_REFLECT): ''' fn['nanmean', 'mean', 'nanmedian', 'median'] a fast 2d filter for large kernel sizes that also works with nans the computation speed is increased because only 'every'nsth position within the median kernel is evaluated ''' if every is None: every = max(ksize//3, 1) else: assert ksize >= 3*every s0,s1 = arr.shape[:2] ss0 = s0//every every = s0//ss0 ss1 = s1//every out = np.full((ss0+1,ss1+1), np.nan) c = {'median':_calcMedian, 'nanmedian':_calcNanMedian, 'nanmean':_calcNanMean, 'mean':_calcMean, }[fn] ss0,ss1 = c(arr, out, ksize, every) out = out[:ss0,:ss1] if smoothksize: out = gaussian_filter(out, smoothksize) if not resize: return out return cv2.resize(out, arr.shape[:2][::-1], interpolation=interpolation)
python
def fastFilter(arr, ksize=30, every=None, resize=True, fn='median', interpolation=cv2.INTER_LANCZOS4, smoothksize=0, borderMode=cv2.BORDER_REFLECT): ''' fn['nanmean', 'mean', 'nanmedian', 'median'] a fast 2d filter for large kernel sizes that also works with nans the computation speed is increased because only 'every'nsth position within the median kernel is evaluated ''' if every is None: every = max(ksize//3, 1) else: assert ksize >= 3*every s0,s1 = arr.shape[:2] ss0 = s0//every every = s0//ss0 ss1 = s1//every out = np.full((ss0+1,ss1+1), np.nan) c = {'median':_calcMedian, 'nanmedian':_calcNanMedian, 'nanmean':_calcNanMean, 'mean':_calcMean, }[fn] ss0,ss1 = c(arr, out, ksize, every) out = out[:ss0,:ss1] if smoothksize: out = gaussian_filter(out, smoothksize) if not resize: return out return cv2.resize(out, arr.shape[:2][::-1], interpolation=interpolation)
[ "def", "fastFilter", "(", "arr", ",", "ksize", "=", "30", ",", "every", "=", "None", ",", "resize", "=", "True", ",", "fn", "=", "'median'", ",", "interpolation", "=", "cv2", ".", "INTER_LANCZOS4", ",", "smoothksize", "=", "0", ",", "borderMode", "=", "cv2", ".", "BORDER_REFLECT", ")", ":", "if", "every", "is", "None", ":", "every", "=", "max", "(", "ksize", "//", "3", ",", "1", ")", "else", ":", "assert", "ksize", ">=", "3", "*", "every", "s0", ",", "s1", "=", "arr", ".", "shape", "[", ":", "2", "]", "ss0", "=", "s0", "//", "every", "every", "=", "s0", "//", "ss0", "ss1", "=", "s1", "//", "every", "out", "=", "np", ".", "full", "(", "(", "ss0", "+", "1", ",", "ss1", "+", "1", ")", ",", "np", ".", "nan", ")", "c", "=", "{", "'median'", ":", "_calcMedian", ",", "'nanmedian'", ":", "_calcNanMedian", ",", "'nanmean'", ":", "_calcNanMean", ",", "'mean'", ":", "_calcMean", ",", "}", "[", "fn", "]", "ss0", ",", "ss1", "=", "c", "(", "arr", ",", "out", ",", "ksize", ",", "every", ")", "out", "=", "out", "[", ":", "ss0", ",", ":", "ss1", "]", "if", "smoothksize", ":", "out", "=", "gaussian_filter", "(", "out", ",", "smoothksize", ")", "if", "not", "resize", ":", "return", "out", "return", "cv2", ".", "resize", "(", "out", ",", "arr", ".", "shape", "[", ":", "2", "]", "[", ":", ":", "-", "1", "]", ",", "interpolation", "=", "interpolation", ")" ]
fn['nanmean', 'mean', 'nanmedian', 'median'] a fast 2d filter for large kernel sizes that also works with nans the computation speed is increased because only 'every'nsth position within the median kernel is evaluated
[ "fn", "[", "nanmean", "mean", "nanmedian", "median", "]", "a", "fast", "2d", "filter", "for", "large", "kernel", "sizes", "that", "also", "works", "with", "nans", "the", "computation", "speed", "is", "increased", "because", "only", "every", "nsth", "position", "within", "the", "median", "kernel", "is", "evaluated" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/fastFilter.py#L9-L48
radjkarl/imgProcessor
imgProcessor/reader/elbin.py
elbin
def elbin(filename): ''' Read EL images (*.elbin) created by the RELTRON EL Software http://www.reltron.com/Products/Solar.html ''' # arrs = [] labels = [] # These are all exposure times [s] to be selectable: TIMES = (0.3, 0.4, 0.6, 0.8, 1.2, 1.6, 2.4, 3.2, 4.8, 6.4, 9.6, 12.8, 19.2, 25.6, 38.4, 51.2, 76.8, 102.6, 153.6, 204.6, 307.2, 409.8, 614.4, 819., 1228.8, 1638.6, 3276.6, 5400., 8100., 12168., 18216., 27324., 41004., 61488., 92268.) with open(filename, 'rb') as f: # image shape and number: height, width, frames = np.frombuffer(f.read(4 * 3), dtype=np.uint32) arrs = np.empty((frames, width, height), dtype=np.uint16) for i in range(frames): # read header between all frames: current, voltage = np.frombuffer(f.read(8 * 2), dtype=np.float64) i_time = np.frombuffer(f.read(4), dtype=np.uint32)[0] time = TIMES[i_time] # read image: arr = np.frombuffer(f.read(width * height * 2), dtype=np.uint16) arrs[i] = arr.reshape(width, height) # last row is all zeros in all imgs # print arr[:,:-1] # arrs.append(arr) labels.append({'exposure time[s]': time, 'current[A]': current, 'voltage[V]': voltage}) return arrs, labels
python
def elbin(filename): ''' Read EL images (*.elbin) created by the RELTRON EL Software http://www.reltron.com/Products/Solar.html ''' # arrs = [] labels = [] # These are all exposure times [s] to be selectable: TIMES = (0.3, 0.4, 0.6, 0.8, 1.2, 1.6, 2.4, 3.2, 4.8, 6.4, 9.6, 12.8, 19.2, 25.6, 38.4, 51.2, 76.8, 102.6, 153.6, 204.6, 307.2, 409.8, 614.4, 819., 1228.8, 1638.6, 3276.6, 5400., 8100., 12168., 18216., 27324., 41004., 61488., 92268.) with open(filename, 'rb') as f: # image shape and number: height, width, frames = np.frombuffer(f.read(4 * 3), dtype=np.uint32) arrs = np.empty((frames, width, height), dtype=np.uint16) for i in range(frames): # read header between all frames: current, voltage = np.frombuffer(f.read(8 * 2), dtype=np.float64) i_time = np.frombuffer(f.read(4), dtype=np.uint32)[0] time = TIMES[i_time] # read image: arr = np.frombuffer(f.read(width * height * 2), dtype=np.uint16) arrs[i] = arr.reshape(width, height) # last row is all zeros in all imgs # print arr[:,:-1] # arrs.append(arr) labels.append({'exposure time[s]': time, 'current[A]': current, 'voltage[V]': voltage}) return arrs, labels
[ "def", "elbin", "(", "filename", ")", ":", "# arrs = []\r", "labels", "=", "[", "]", "# These are all exposure times [s] to be selectable:\r", "TIMES", "=", "(", "0.3", ",", "0.4", ",", "0.6", ",", "0.8", ",", "1.2", ",", "1.6", ",", "2.4", ",", "3.2", ",", "4.8", ",", "6.4", ",", "9.6", ",", "12.8", ",", "19.2", ",", "25.6", ",", "38.4", ",", "51.2", ",", "76.8", ",", "102.6", ",", "153.6", ",", "204.6", ",", "307.2", ",", "409.8", ",", "614.4", ",", "819.", ",", "1228.8", ",", "1638.6", ",", "3276.6", ",", "5400.", ",", "8100.", ",", "12168.", ",", "18216.", ",", "27324.", ",", "41004.", ",", "61488.", ",", "92268.", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "# image shape and number:\r", "height", ",", "width", ",", "frames", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", "4", "*", "3", ")", ",", "dtype", "=", "np", ".", "uint32", ")", "arrs", "=", "np", ".", "empty", "(", "(", "frames", ",", "width", ",", "height", ")", ",", "dtype", "=", "np", ".", "uint16", ")", "for", "i", "in", "range", "(", "frames", ")", ":", "# read header between all frames:\r", "current", ",", "voltage", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", "8", "*", "2", ")", ",", "dtype", "=", "np", ".", "float64", ")", "i_time", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", "4", ")", ",", "dtype", "=", "np", ".", "uint32", ")", "[", "0", "]", "time", "=", "TIMES", "[", "i_time", "]", "# read image:\r", "arr", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", "width", "*", "height", "*", "2", ")", ",", "dtype", "=", "np", ".", "uint16", ")", "arrs", "[", "i", "]", "=", "arr", ".", "reshape", "(", "width", ",", "height", ")", "# last row is all zeros in all imgs\r", "# print arr[:,:-1]\r", "# arrs.append(arr)\r", "labels", ".", "append", "(", "{", "'exposure time[s]'", ":", "time", ",", "'current[A]'", ":", "current", ",", "'voltage[V]'", ":", "voltage", "}", ")", "return", "arrs", ",", "labels" ]
Read EL images (*.elbin) created by the RELTRON EL Software http://www.reltron.com/Products/Solar.html
[ "Read", "EL", "images", "(", "*", ".", "elbin", ")", "created", "by", "the", "RELTRON", "EL", "Software", "http", ":", "//", "www", ".", "reltron", ".", "com", "/", "Products", "/", "Solar", ".", "html" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/reader/elbin.py#L7-L40
radjkarl/imgProcessor
imgProcessor/equations/gaussian2d.py
gaussian2d
def gaussian2d(xy, sx, sy, mx=0, my=0, rho=0, amp=1, offs=0): ''' see http://en.wikipedia.org/wiki/Multivariate_normal_distribution # probability density function of a vector [x,y] sx,sy -> sigma (standard deviation) mx,my: mue (mean position) rho: correlation between x and y ''' x,y = xy return offs+amp*( 1/(2*np.pi*sx*sy*(1-(rho**2))**0.5) * np.exp( (-1/(2*(1-rho**2))) * ( ( (x-mx)**2/sx**2 ) + ( (y-my)**2/sy**2 ) - ( ( 2*rho*(x-mx)*(y-my)) / (sx*sy) ) ) ) )
python
def gaussian2d(xy, sx, sy, mx=0, my=0, rho=0, amp=1, offs=0): ''' see http://en.wikipedia.org/wiki/Multivariate_normal_distribution # probability density function of a vector [x,y] sx,sy -> sigma (standard deviation) mx,my: mue (mean position) rho: correlation between x and y ''' x,y = xy return offs+amp*( 1/(2*np.pi*sx*sy*(1-(rho**2))**0.5) * np.exp( (-1/(2*(1-rho**2))) * ( ( (x-mx)**2/sx**2 ) + ( (y-my)**2/sy**2 ) - ( ( 2*rho*(x-mx)*(y-my)) / (sx*sy) ) ) ) )
[ "def", "gaussian2d", "(", "xy", ",", "sx", ",", "sy", ",", "mx", "=", "0", ",", "my", "=", "0", ",", "rho", "=", "0", ",", "amp", "=", "1", ",", "offs", "=", "0", ")", ":", "x", ",", "y", "=", "xy", "return", "offs", "+", "amp", "*", "(", "1", "/", "(", "2", "*", "np", ".", "pi", "*", "sx", "*", "sy", "*", "(", "1", "-", "(", "rho", "**", "2", ")", ")", "**", "0.5", ")", "*", "np", ".", "exp", "(", "(", "-", "1", "/", "(", "2", "*", "(", "1", "-", "rho", "**", "2", ")", ")", ")", "*", "(", "(", "(", "x", "-", "mx", ")", "**", "2", "/", "sx", "**", "2", ")", "+", "(", "(", "y", "-", "my", ")", "**", "2", "/", "sy", "**", "2", ")", "-", "(", "(", "2", "*", "rho", "*", "(", "x", "-", "mx", ")", "*", "(", "y", "-", "my", ")", ")", "/", "(", "sx", "*", "sy", ")", ")", ")", ")", ")" ]
see http://en.wikipedia.org/wiki/Multivariate_normal_distribution # probability density function of a vector [x,y] sx,sy -> sigma (standard deviation) mx,my: mue (mean position) rho: correlation between x and y
[ "see", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Multivariate_normal_distribution", "#", "probability", "density", "function", "of", "a", "vector", "[", "x", "y", "]", "sx", "sy", "-", ">", "sigma", "(", "standard", "deviation", ")", "mx", "my", ":", "mue", "(", "mean", "position", ")", "rho", ":", "correlation", "between", "x", "and", "y" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/gaussian2d.py#L5-L23
radjkarl/imgProcessor
imgProcessor/transform/PerspectiveImageStitching.py
PerspectiveImageStitching.fitImg
def fitImg(self, img_rgb): ''' fit perspective and size of the input image to the base image ''' H = self.pattern.findHomography(img_rgb)[0] H_inv = self.pattern.invertHomography(H) s = self.img_orig.shape warped = cv2.warpPerspective(img_rgb, H_inv, (s[1], s[0])) return warped
python
def fitImg(self, img_rgb): ''' fit perspective and size of the input image to the base image ''' H = self.pattern.findHomography(img_rgb)[0] H_inv = self.pattern.invertHomography(H) s = self.img_orig.shape warped = cv2.warpPerspective(img_rgb, H_inv, (s[1], s[0])) return warped
[ "def", "fitImg", "(", "self", ",", "img_rgb", ")", ":", "H", "=", "self", ".", "pattern", ".", "findHomography", "(", "img_rgb", ")", "[", "0", "]", "H_inv", "=", "self", ".", "pattern", ".", "invertHomography", "(", "H", ")", "s", "=", "self", ".", "img_orig", ".", "shape", "warped", "=", "cv2", ".", "warpPerspective", "(", "img_rgb", ",", "H_inv", ",", "(", "s", "[", "1", "]", ",", "s", "[", "0", "]", ")", ")", "return", "warped" ]
fit perspective and size of the input image to the base image
[ "fit", "perspective", "and", "size", "of", "the", "input", "image", "to", "the", "base", "image" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/PerspectiveImageStitching.py#L26-L34
radjkarl/imgProcessor
imgProcessor/imgSignal.py
scaleSignalCut
def scaleSignalCut(img, ratio, nbins=100): ''' scaling img cutting x percent of top and bottom part of histogram ''' start, stop = scaleSignalCutParams(img, ratio, nbins) img = img - start img /= (stop - start) return img
python
def scaleSignalCut(img, ratio, nbins=100): ''' scaling img cutting x percent of top and bottom part of histogram ''' start, stop = scaleSignalCutParams(img, ratio, nbins) img = img - start img /= (stop - start) return img
[ "def", "scaleSignalCut", "(", "img", ",", "ratio", ",", "nbins", "=", "100", ")", ":", "start", ",", "stop", "=", "scaleSignalCutParams", "(", "img", ",", "ratio", ",", "nbins", ")", "img", "=", "img", "-", "start", "img", "/=", "(", "stop", "-", "start", ")", "return", "img" ]
scaling img cutting x percent of top and bottom part of histogram
[ "scaling", "img", "cutting", "x", "percent", "of", "top", "and", "bottom", "part", "of", "histogram" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L14-L21
radjkarl/imgProcessor
imgProcessor/imgSignal.py
scaleSignal
def scaleSignal(img, fitParams=None, backgroundToZero=False, reference=None): ''' scale the image between... backgroundToZero=True -> 0 (average background) and 1 (maximum signal) backgroundToZero=False -> signal+-3std reference -> reference image -- scale image to fit this one returns: scaled image ''' img = imread(img) if reference is not None: # def fn(ii, m,n): # return ii*m+n # curve_fit(fn, img[::10,::10], ref[::10,::10]) low, high = signalRange(img, fitParams) low2, high2 = signalRange(reference) img = np.asfarray(img) ampl = (high2 - low2) / (high - low) img -= low img *= ampl img += low2 return img else: offs, div = scaleParams(img, fitParams, backgroundToZero) img = np.asfarray(img) - offs img /= div print('offset: %s, divident: %s' % (offs, div)) return img
python
def scaleSignal(img, fitParams=None, backgroundToZero=False, reference=None): ''' scale the image between... backgroundToZero=True -> 0 (average background) and 1 (maximum signal) backgroundToZero=False -> signal+-3std reference -> reference image -- scale image to fit this one returns: scaled image ''' img = imread(img) if reference is not None: # def fn(ii, m,n): # return ii*m+n # curve_fit(fn, img[::10,::10], ref[::10,::10]) low, high = signalRange(img, fitParams) low2, high2 = signalRange(reference) img = np.asfarray(img) ampl = (high2 - low2) / (high - low) img -= low img *= ampl img += low2 return img else: offs, div = scaleParams(img, fitParams, backgroundToZero) img = np.asfarray(img) - offs img /= div print('offset: %s, divident: %s' % (offs, div)) return img
[ "def", "scaleSignal", "(", "img", ",", "fitParams", "=", "None", ",", "backgroundToZero", "=", "False", ",", "reference", "=", "None", ")", ":", "img", "=", "imread", "(", "img", ")", "if", "reference", "is", "not", "None", ":", "# def fn(ii, m,n):\r", "# return ii*m+n\r", "# curve_fit(fn, img[::10,::10], ref[::10,::10])\r", "low", ",", "high", "=", "signalRange", "(", "img", ",", "fitParams", ")", "low2", ",", "high2", "=", "signalRange", "(", "reference", ")", "img", "=", "np", ".", "asfarray", "(", "img", ")", "ampl", "=", "(", "high2", "-", "low2", ")", "/", "(", "high", "-", "low", ")", "img", "-=", "low", "img", "*=", "ampl", "img", "+=", "low2", "return", "img", "else", ":", "offs", ",", "div", "=", "scaleParams", "(", "img", ",", "fitParams", ",", "backgroundToZero", ")", "img", "=", "np", ".", "asfarray", "(", "img", ")", "-", "offs", "img", "/=", "div", "print", "(", "'offset: %s, divident: %s'", "%", "(", "offs", ",", "div", ")", ")", "return", "img" ]
scale the image between... backgroundToZero=True -> 0 (average background) and 1 (maximum signal) backgroundToZero=False -> signal+-3std reference -> reference image -- scale image to fit this one returns: scaled image
[ "scale", "the", "image", "between", "...", "backgroundToZero", "=", "True", "-", ">", "0", "(", "average", "background", ")", "and", "1", "(", "maximum", "signal", ")", "backgroundToZero", "=", "False", "-", ">", "signal", "+", "-", "3std", "reference", "-", ">", "reference", "image", "--", "scale", "image", "to", "fit", "this", "one", "returns", ":", "scaled", "image" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L72-L104
radjkarl/imgProcessor
imgProcessor/imgSignal.py
getBackgroundRange
def getBackgroundRange(fitParams): ''' return minimum, average, maximum of the background peak ''' smn, _, _ = getSignalParameters(fitParams) bg = fitParams[0] _, avg, std = bg bgmn = max(0, avg - 3 * std) if avg + 4 * std < smn: bgmx = avg + 4 * std if avg + 3 * std < smn: bgmx = avg + 3 * std if avg + 2 * std < smn: bgmx = avg + 2 * std else: bgmx = avg + std return bgmn, avg, bgmx
python
def getBackgroundRange(fitParams): ''' return minimum, average, maximum of the background peak ''' smn, _, _ = getSignalParameters(fitParams) bg = fitParams[0] _, avg, std = bg bgmn = max(0, avg - 3 * std) if avg + 4 * std < smn: bgmx = avg + 4 * std if avg + 3 * std < smn: bgmx = avg + 3 * std if avg + 2 * std < smn: bgmx = avg + 2 * std else: bgmx = avg + std return bgmn, avg, bgmx
[ "def", "getBackgroundRange", "(", "fitParams", ")", ":", "smn", ",", "_", ",", "_", "=", "getSignalParameters", "(", "fitParams", ")", "bg", "=", "fitParams", "[", "0", "]", "_", ",", "avg", ",", "std", "=", "bg", "bgmn", "=", "max", "(", "0", ",", "avg", "-", "3", "*", "std", ")", "if", "avg", "+", "4", "*", "std", "<", "smn", ":", "bgmx", "=", "avg", "+", "4", "*", "std", "if", "avg", "+", "3", "*", "std", "<", "smn", ":", "bgmx", "=", "avg", "+", "3", "*", "std", "if", "avg", "+", "2", "*", "std", "<", "smn", ":", "bgmx", "=", "avg", "+", "2", "*", "std", "else", ":", "bgmx", "=", "avg", "+", "std", "return", "bgmn", ",", "avg", ",", "bgmx" ]
return minimum, average, maximum of the background peak
[ "return", "minimum", "average", "maximum", "of", "the", "background", "peak" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L107-L125
radjkarl/imgProcessor
imgProcessor/imgSignal.py
hasBackground
def hasBackground(fitParams): ''' compare the height of putative bg and signal peak if ratio if too height assume there is no background ''' signal = getSignalPeak(fitParams) bg = getBackgroundPeak(fitParams) if signal == bg: return False r = signal[0] / bg[0] if r < 1: r = 1 / r return r < 100
python
def hasBackground(fitParams): ''' compare the height of putative bg and signal peak if ratio if too height assume there is no background ''' signal = getSignalPeak(fitParams) bg = getBackgroundPeak(fitParams) if signal == bg: return False r = signal[0] / bg[0] if r < 1: r = 1 / r return r < 100
[ "def", "hasBackground", "(", "fitParams", ")", ":", "signal", "=", "getSignalPeak", "(", "fitParams", ")", "bg", "=", "getBackgroundPeak", "(", "fitParams", ")", "if", "signal", "==", "bg", ":", "return", "False", "r", "=", "signal", "[", "0", "]", "/", "bg", "[", "0", "]", "if", "r", "<", "1", ":", "r", "=", "1", "/", "r", "return", "r", "<", "100" ]
compare the height of putative bg and signal peak if ratio if too height assume there is no background
[ "compare", "the", "height", "of", "putative", "bg", "and", "signal", "peak", "if", "ratio", "if", "too", "height", "assume", "there", "is", "no", "background" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L128-L140
radjkarl/imgProcessor
imgProcessor/imgSignal.py
signalMinimum2
def signalMinimum2(img, bins=None): ''' minimum position between signal and background peak ''' f = FitHistogramPeaks(img, bins=bins) i = signalPeakIndex(f.fitParams) spos = f.fitParams[i][1] # spos = getSignalPeak(f.fitParams)[1] # bpos = getBackgroundPeak(f.fitParams)[1] bpos = f.fitParams[i - 1][1] ind = np.logical_and(f.xvals > bpos, f.xvals < spos) try: i = np.argmin(f.yvals[ind]) return f.xvals[ind][i] except ValueError as e: if bins is None: return signalMinimum2(img, bins=400) else: raise e
python
def signalMinimum2(img, bins=None): ''' minimum position between signal and background peak ''' f = FitHistogramPeaks(img, bins=bins) i = signalPeakIndex(f.fitParams) spos = f.fitParams[i][1] # spos = getSignalPeak(f.fitParams)[1] # bpos = getBackgroundPeak(f.fitParams)[1] bpos = f.fitParams[i - 1][1] ind = np.logical_and(f.xvals > bpos, f.xvals < spos) try: i = np.argmin(f.yvals[ind]) return f.xvals[ind][i] except ValueError as e: if bins is None: return signalMinimum2(img, bins=400) else: raise e
[ "def", "signalMinimum2", "(", "img", ",", "bins", "=", "None", ")", ":", "f", "=", "FitHistogramPeaks", "(", "img", ",", "bins", "=", "bins", ")", "i", "=", "signalPeakIndex", "(", "f", ".", "fitParams", ")", "spos", "=", "f", ".", "fitParams", "[", "i", "]", "[", "1", "]", "# spos = getSignalPeak(f.fitParams)[1]\r", "# bpos = getBackgroundPeak(f.fitParams)[1]\r", "bpos", "=", "f", ".", "fitParams", "[", "i", "-", "1", "]", "[", "1", "]", "ind", "=", "np", ".", "logical_and", "(", "f", ".", "xvals", ">", "bpos", ",", "f", ".", "xvals", "<", "spos", ")", "try", ":", "i", "=", "np", ".", "argmin", "(", "f", ".", "yvals", "[", "ind", "]", ")", "return", "f", ".", "xvals", "[", "ind", "]", "[", "i", "]", "except", "ValueError", "as", "e", ":", "if", "bins", "is", "None", ":", "return", "signalMinimum2", "(", "img", ",", "bins", "=", "400", ")", "else", ":", "raise", "e" ]
minimum position between signal and background peak
[ "minimum", "position", "between", "signal", "and", "background", "peak" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L161-L179
radjkarl/imgProcessor
imgProcessor/imgSignal.py
signalMinimum
def signalMinimum(img, fitParams=None, n_std=3): ''' intersection between signal and background peak ''' if fitParams is None: fitParams = FitHistogramPeaks(img).fitParams assert len(fitParams) > 1, 'need 2 peaks so get minimum signal' i = signalPeakIndex(fitParams) signal = fitParams[i] bg = getBackgroundPeak(fitParams) smn = signal[1] - n_std * signal[2] bmx = bg[1] + n_std * bg[2] if smn > bmx: return smn # peaks are overlapping # define signal min. as intersection between both Gaussians def solve(p1, p2): s1, m1, std1 = p1 s2, m2, std2 = p2 a = (1 / (2 * std1**2)) - (1 / (2 * std2**2)) b = (m2 / (std2**2)) - (m1 / (std1**2)) c = (m1**2 / (2 * std1**2)) - (m2**2 / (2 * std2**2)) - \ np.log(((std2 * s1) / (std1 * s2))) return np.roots([a, b, c]) i = solve(bg, signal) try: return i[np.logical_and(i > bg[1], i < signal[1])][0] except IndexError: # this error shouldn't occur... well return max(smn, bmx)
python
def signalMinimum(img, fitParams=None, n_std=3): ''' intersection between signal and background peak ''' if fitParams is None: fitParams = FitHistogramPeaks(img).fitParams assert len(fitParams) > 1, 'need 2 peaks so get minimum signal' i = signalPeakIndex(fitParams) signal = fitParams[i] bg = getBackgroundPeak(fitParams) smn = signal[1] - n_std * signal[2] bmx = bg[1] + n_std * bg[2] if smn > bmx: return smn # peaks are overlapping # define signal min. as intersection between both Gaussians def solve(p1, p2): s1, m1, std1 = p1 s2, m2, std2 = p2 a = (1 / (2 * std1**2)) - (1 / (2 * std2**2)) b = (m2 / (std2**2)) - (m1 / (std1**2)) c = (m1**2 / (2 * std1**2)) - (m2**2 / (2 * std2**2)) - \ np.log(((std2 * s1) / (std1 * s2))) return np.roots([a, b, c]) i = solve(bg, signal) try: return i[np.logical_and(i > bg[1], i < signal[1])][0] except IndexError: # this error shouldn't occur... well return max(smn, bmx)
[ "def", "signalMinimum", "(", "img", ",", "fitParams", "=", "None", ",", "n_std", "=", "3", ")", ":", "if", "fitParams", "is", "None", ":", "fitParams", "=", "FitHistogramPeaks", "(", "img", ")", ".", "fitParams", "assert", "len", "(", "fitParams", ")", ">", "1", ",", "'need 2 peaks so get minimum signal'", "i", "=", "signalPeakIndex", "(", "fitParams", ")", "signal", "=", "fitParams", "[", "i", "]", "bg", "=", "getBackgroundPeak", "(", "fitParams", ")", "smn", "=", "signal", "[", "1", "]", "-", "n_std", "*", "signal", "[", "2", "]", "bmx", "=", "bg", "[", "1", "]", "+", "n_std", "*", "bg", "[", "2", "]", "if", "smn", ">", "bmx", ":", "return", "smn", "# peaks are overlapping\r", "# define signal min. as intersection between both Gaussians\r", "def", "solve", "(", "p1", ",", "p2", ")", ":", "s1", ",", "m1", ",", "std1", "=", "p1", "s2", ",", "m2", ",", "std2", "=", "p2", "a", "=", "(", "1", "/", "(", "2", "*", "std1", "**", "2", ")", ")", "-", "(", "1", "/", "(", "2", "*", "std2", "**", "2", ")", ")", "b", "=", "(", "m2", "/", "(", "std2", "**", "2", ")", ")", "-", "(", "m1", "/", "(", "std1", "**", "2", ")", ")", "c", "=", "(", "m1", "**", "2", "/", "(", "2", "*", "std1", "**", "2", ")", ")", "-", "(", "m2", "**", "2", "/", "(", "2", "*", "std2", "**", "2", ")", ")", "-", "np", ".", "log", "(", "(", "(", "std2", "*", "s1", ")", "/", "(", "std1", "*", "s2", ")", ")", ")", "return", "np", ".", "roots", "(", "[", "a", ",", "b", ",", "c", "]", ")", "i", "=", "solve", "(", "bg", ",", "signal", ")", "try", ":", "return", "i", "[", "np", ".", "logical_and", "(", "i", ">", "bg", "[", "1", "]", ",", "i", "<", "signal", "[", "1", "]", ")", "]", "[", "0", "]", "except", "IndexError", ":", "# this error shouldn't occur... well\r", "return", "max", "(", "smn", ",", "bmx", ")" ]
intersection between signal and background peak
[ "intersection", "between", "signal", "and", "background", "peak" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L182-L213
radjkarl/imgProcessor
imgProcessor/imgSignal.py
getSignalParameters
def getSignalParameters(fitParams, n_std=3): ''' return minimum, average, maximum of the signal peak ''' signal = getSignalPeak(fitParams) mx = signal[1] + n_std * signal[2] mn = signal[1] - n_std * signal[2] if mn < fitParams[0][1]: mn = fitParams[0][1] # set to bg return mn, signal[1], mx
python
def getSignalParameters(fitParams, n_std=3): ''' return minimum, average, maximum of the signal peak ''' signal = getSignalPeak(fitParams) mx = signal[1] + n_std * signal[2] mn = signal[1] - n_std * signal[2] if mn < fitParams[0][1]: mn = fitParams[0][1] # set to bg return mn, signal[1], mx
[ "def", "getSignalParameters", "(", "fitParams", ",", "n_std", "=", "3", ")", ":", "signal", "=", "getSignalPeak", "(", "fitParams", ")", "mx", "=", "signal", "[", "1", "]", "+", "n_std", "*", "signal", "[", "2", "]", "mn", "=", "signal", "[", "1", "]", "-", "n_std", "*", "signal", "[", "2", "]", "if", "mn", "<", "fitParams", "[", "0", "]", "[", "1", "]", ":", "mn", "=", "fitParams", "[", "0", "]", "[", "1", "]", "# set to bg\r", "return", "mn", ",", "signal", "[", "1", "]", ",", "mx" ]
return minimum, average, maximum of the signal peak
[ "return", "minimum", "average", "maximum", "of", "the", "signal", "peak" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgSignal.py#L250-L259
radjkarl/imgProcessor
imgProcessor/transform/equalizeImage.py
equalizeImage
def equalizeImage(img, save_path=None, name_additive='_eqHist'): ''' Equalize the histogram (contrast) of an image works with RGB/multi-channel images and flat-arrays @param img - image_path or np.array @param save_path if given output images will be saved there @param name_additive if given this additive will be appended to output images @return output images if input images are numpy.arrays and no save_path is given @return None elsewise ''' if isinstance(img, string_types): img = PathStr(img) if not img.exists(): raise Exception("image path doesn't exist") img_name = img.basename().replace('.', '%s.' % name_additive) if save_path is None: save_path = img.dirname() img = cv2.imread(img) if img.dtype != np.dtype('uint8'): # openCV cannot work with float arrays or uint > 8bit eqFn = _equalizeHistogram else: eqFn = cv2.equalizeHist if len(img.shape) == 3: # multi channel img like rgb for i in range(img.shape[2]): img[:, :, i] = eqFn(img[:, :, i]) else: # grey scale image img = eqFn(img) if save_path: img_name = PathStr(save_path).join(img_name) cv2.imwrite(img_name, img) return img
python
def equalizeImage(img, save_path=None, name_additive='_eqHist'): ''' Equalize the histogram (contrast) of an image works with RGB/multi-channel images and flat-arrays @param img - image_path or np.array @param save_path if given output images will be saved there @param name_additive if given this additive will be appended to output images @return output images if input images are numpy.arrays and no save_path is given @return None elsewise ''' if isinstance(img, string_types): img = PathStr(img) if not img.exists(): raise Exception("image path doesn't exist") img_name = img.basename().replace('.', '%s.' % name_additive) if save_path is None: save_path = img.dirname() img = cv2.imread(img) if img.dtype != np.dtype('uint8'): # openCV cannot work with float arrays or uint > 8bit eqFn = _equalizeHistogram else: eqFn = cv2.equalizeHist if len(img.shape) == 3: # multi channel img like rgb for i in range(img.shape[2]): img[:, :, i] = eqFn(img[:, :, i]) else: # grey scale image img = eqFn(img) if save_path: img_name = PathStr(save_path).join(img_name) cv2.imwrite(img_name, img) return img
[ "def", "equalizeImage", "(", "img", ",", "save_path", "=", "None", ",", "name_additive", "=", "'_eqHist'", ")", ":", "if", "isinstance", "(", "img", ",", "string_types", ")", ":", "img", "=", "PathStr", "(", "img", ")", "if", "not", "img", ".", "exists", "(", ")", ":", "raise", "Exception", "(", "\"image path doesn't exist\"", ")", "img_name", "=", "img", ".", "basename", "(", ")", ".", "replace", "(", "'.'", ",", "'%s.'", "%", "name_additive", ")", "if", "save_path", "is", "None", ":", "save_path", "=", "img", ".", "dirname", "(", ")", "img", "=", "cv2", ".", "imread", "(", "img", ")", "if", "img", ".", "dtype", "!=", "np", ".", "dtype", "(", "'uint8'", ")", ":", "# openCV cannot work with float arrays or uint > 8bit\r", "eqFn", "=", "_equalizeHistogram", "else", ":", "eqFn", "=", "cv2", ".", "equalizeHist", "if", "len", "(", "img", ".", "shape", ")", "==", "3", ":", "# multi channel img like rgb\r", "for", "i", "in", "range", "(", "img", ".", "shape", "[", "2", "]", ")", ":", "img", "[", ":", ",", ":", ",", "i", "]", "=", "eqFn", "(", "img", "[", ":", ",", ":", ",", "i", "]", ")", "else", ":", "# grey scale image\r", "img", "=", "eqFn", "(", "img", ")", "if", "save_path", ":", "img_name", "=", "PathStr", "(", "save_path", ")", ".", "join", "(", "img_name", ")", "cv2", ".", "imwrite", "(", "img_name", ",", "img", ")", "return", "img" ]
Equalize the histogram (contrast) of an image works with RGB/multi-channel images and flat-arrays @param img - image_path or np.array @param save_path if given output images will be saved there @param name_additive if given this additive will be appended to output images @return output images if input images are numpy.arrays and no save_path is given @return None elsewise
[ "Equalize", "the", "histogram", "(", "contrast", ")", "of", "an", "image", "works", "with", "RGB", "/", "multi", "-", "channel", "images", "and", "flat", "-", "arrays" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/equalizeImage.py#L11-L47
radjkarl/imgProcessor
imgProcessor/transform/equalizeImage.py
_equalizeHistogram
def _equalizeHistogram(img): ''' histogram equalisation not bounded to int() or an image depth of 8 bit works also with negative numbers ''' # to float if int: intType = None if 'f' not in img.dtype.str: TO_FLOAT_TYPES = {np.dtype('uint8'): np.float16, np.dtype('uint16'): np.float32, np.dtype('uint32'): np.float64, np.dtype('uint64'): np.float64} intType = img.dtype img = img.astype(TO_FLOAT_TYPES[intType], copy=False) # get image deph DEPTH_TO_NBINS = {np.dtype('float16'): 256, # uint8 np.dtype('float32'): 32768, # uint16 np.dtype('float64'): 2147483648} # uint32 nBins = DEPTH_TO_NBINS[img.dtype] # scale to -1 to 1 due to skikit-image restrictions mn, mx = np.amin(img), np.amax(img) if abs(mn) > abs(mx): mx = mn img /= mx img = exposure.equalize_hist(img, nbins=nBins) img *= mx if intType: img = img.astype(intType) return img
python
def _equalizeHistogram(img): ''' histogram equalisation not bounded to int() or an image depth of 8 bit works also with negative numbers ''' # to float if int: intType = None if 'f' not in img.dtype.str: TO_FLOAT_TYPES = {np.dtype('uint8'): np.float16, np.dtype('uint16'): np.float32, np.dtype('uint32'): np.float64, np.dtype('uint64'): np.float64} intType = img.dtype img = img.astype(TO_FLOAT_TYPES[intType], copy=False) # get image deph DEPTH_TO_NBINS = {np.dtype('float16'): 256, # uint8 np.dtype('float32'): 32768, # uint16 np.dtype('float64'): 2147483648} # uint32 nBins = DEPTH_TO_NBINS[img.dtype] # scale to -1 to 1 due to skikit-image restrictions mn, mx = np.amin(img), np.amax(img) if abs(mn) > abs(mx): mx = mn img /= mx img = exposure.equalize_hist(img, nbins=nBins) img *= mx if intType: img = img.astype(intType) return img
[ "def", "_equalizeHistogram", "(", "img", ")", ":", "# to float if int:\r", "intType", "=", "None", "if", "'f'", "not", "in", "img", ".", "dtype", ".", "str", ":", "TO_FLOAT_TYPES", "=", "{", "np", ".", "dtype", "(", "'uint8'", ")", ":", "np", ".", "float16", ",", "np", ".", "dtype", "(", "'uint16'", ")", ":", "np", ".", "float32", ",", "np", ".", "dtype", "(", "'uint32'", ")", ":", "np", ".", "float64", ",", "np", ".", "dtype", "(", "'uint64'", ")", ":", "np", ".", "float64", "}", "intType", "=", "img", ".", "dtype", "img", "=", "img", ".", "astype", "(", "TO_FLOAT_TYPES", "[", "intType", "]", ",", "copy", "=", "False", ")", "# get image deph\r", "DEPTH_TO_NBINS", "=", "{", "np", ".", "dtype", "(", "'float16'", ")", ":", "256", ",", "# uint8\r", "np", ".", "dtype", "(", "'float32'", ")", ":", "32768", ",", "# uint16\r", "np", ".", "dtype", "(", "'float64'", ")", ":", "2147483648", "}", "# uint32\r", "nBins", "=", "DEPTH_TO_NBINS", "[", "img", ".", "dtype", "]", "# scale to -1 to 1 due to skikit-image restrictions\r", "mn", ",", "mx", "=", "np", ".", "amin", "(", "img", ")", ",", "np", ".", "amax", "(", "img", ")", "if", "abs", "(", "mn", ")", ">", "abs", "(", "mx", ")", ":", "mx", "=", "mn", "img", "/=", "mx", "img", "=", "exposure", ".", "equalize_hist", "(", "img", ",", "nbins", "=", "nBins", ")", "img", "*=", "mx", "if", "intType", ":", "img", "=", "img", ".", "astype", "(", "intType", ")", "return", "img" ]
histogram equalisation not bounded to int() or an image depth of 8 bit works also with negative numbers
[ "histogram", "equalisation", "not", "bounded", "to", "int", "()", "or", "an", "image", "depth", "of", "8", "bit", "works", "also", "with", "negative", "numbers" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/equalizeImage.py#L50-L83
radjkarl/imgProcessor
imgProcessor/filters/localizedMaximum.py
localizedMaximum
def localizedMaximum(img, thresh=0, min_increase=0, max_length=0, dtype=bool): ''' Returns the local maximum of a given 2d array thresh -> if given, ignore all values below that value max_length -> limit length between value has to vary > min_increase >>> a = np.array([[0,1,2,3,2,1,0], \ [0,1,2,2,3,1,0], \ [0,1,1,2,2,3,0], \ [0,1,1,2,1,1,0], \ [0,0,0,1,1,0,0]]) >>> print localizedMaximum(a, dtype=int) [[0 1 1 1 0 1 0] [0 0 0 0 1 0 0] [0 0 0 1 0 1 0] [0 0 1 1 0 1 0] [0 0 0 1 0 0 0]] ''' # because numba cannot create arrays: out = np.zeros(shape=img.shape, dtype=dtype) # first iterate all rows: _calc(img, out, thresh, min_increase, max_length) # that all columns: _calc(img.T, out.T, thresh, min_increase, max_length) return out
python
def localizedMaximum(img, thresh=0, min_increase=0, max_length=0, dtype=bool): ''' Returns the local maximum of a given 2d array thresh -> if given, ignore all values below that value max_length -> limit length between value has to vary > min_increase >>> a = np.array([[0,1,2,3,2,1,0], \ [0,1,2,2,3,1,0], \ [0,1,1,2,2,3,0], \ [0,1,1,2,1,1,0], \ [0,0,0,1,1,0,0]]) >>> print localizedMaximum(a, dtype=int) [[0 1 1 1 0 1 0] [0 0 0 0 1 0 0] [0 0 0 1 0 1 0] [0 0 1 1 0 1 0] [0 0 0 1 0 0 0]] ''' # because numba cannot create arrays: out = np.zeros(shape=img.shape, dtype=dtype) # first iterate all rows: _calc(img, out, thresh, min_increase, max_length) # that all columns: _calc(img.T, out.T, thresh, min_increase, max_length) return out
[ "def", "localizedMaximum", "(", "img", ",", "thresh", "=", "0", ",", "min_increase", "=", "0", ",", "max_length", "=", "0", ",", "dtype", "=", "bool", ")", ":", "# because numba cannot create arrays:\r", "out", "=", "np", ".", "zeros", "(", "shape", "=", "img", ".", "shape", ",", "dtype", "=", "dtype", ")", "# first iterate all rows:\r", "_calc", "(", "img", ",", "out", ",", "thresh", ",", "min_increase", ",", "max_length", ")", "# that all columns:\r", "_calc", "(", "img", ".", "T", ",", "out", ".", "T", ",", "thresh", ",", "min_increase", ",", "max_length", ")", "return", "out" ]
Returns the local maximum of a given 2d array thresh -> if given, ignore all values below that value max_length -> limit length between value has to vary > min_increase >>> a = np.array([[0,1,2,3,2,1,0], \ [0,1,2,2,3,1,0], \ [0,1,1,2,2,3,0], \ [0,1,1,2,1,1,0], \ [0,0,0,1,1,0,0]]) >>> print localizedMaximum(a, dtype=int) [[0 1 1 1 0 1 0] [0 0 0 0 1 0 0] [0 0 0 1 0 1 0] [0 0 1 1 0 1 0] [0 0 0 1 0 0 0]]
[ "Returns", "the", "local", "maximum", "of", "a", "given", "2d", "array", "thresh", "-", ">", "if", "given", "ignore", "all", "values", "below", "that", "value", "max_length", "-", ">", "limit", "length", "between", "value", "has", "to", "vary", ">", "min_increase", ">>>", "a", "=", "np", ".", "array", "(", "[[", "0", "1", "2", "3", "2", "1", "0", "]", "\\", "[", "0", "1", "2", "2", "3", "1", "0", "]", "\\", "[", "0", "1", "1", "2", "2", "3", "0", "]", "\\", "[", "0", "1", "1", "2", "1", "1", "0", "]", "\\", "[", "0", "0", "0", "1", "1", "0", "0", "]]", ")", ">>>", "print", "localizedMaximum", "(", "a", "dtype", "=", "int", ")", "[[", "0", "1", "1", "1", "0", "1", "0", "]", "[", "0", "0", "0", "0", "1", "0", "0", "]", "[", "0", "0", "0", "1", "0", "1", "0", "]", "[", "0", "0", "1", "1", "0", "1", "0", "]", "[", "0", "0", "0", "1", "0", "0", "0", "]]" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/localizedMaximum.py#L5-L33
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.setReference
def setReference(self, ref): ''' ref ... either quad, grid, homography or reference image quad --> list of four image points(x,y) marking the edges of the quad to correct homography --> h. matrix to correct perspective distortion referenceImage --> image of same object without perspective distortion ''' # self.maps = {} self.quad = None # self.refQuad = None self._camera_position = None self._homography = None self._homography_is_fixed = True # self.tvec, self.rvec = None, None self._pose = None # evaluate input: if isinstance(ref, np.ndarray) and ref.shape == (3, 3): # REF IS HOMOGRAPHY self._homography = ref # REF IS QUAD elif len(ref) == 4: self.quad = sortCorners(ref) # TODO: cleanup # only need to call once - here o = self.obj_points # no property any more # REF IS IMAGE else: self.ref = imread(ref) # self._refshape = ref.shape[:2] self.pattern = PatternRecognition(self.ref) self._homography_is_fixed = False
python
def setReference(self, ref): ''' ref ... either quad, grid, homography or reference image quad --> list of four image points(x,y) marking the edges of the quad to correct homography --> h. matrix to correct perspective distortion referenceImage --> image of same object without perspective distortion ''' # self.maps = {} self.quad = None # self.refQuad = None self._camera_position = None self._homography = None self._homography_is_fixed = True # self.tvec, self.rvec = None, None self._pose = None # evaluate input: if isinstance(ref, np.ndarray) and ref.shape == (3, 3): # REF IS HOMOGRAPHY self._homography = ref # REF IS QUAD elif len(ref) == 4: self.quad = sortCorners(ref) # TODO: cleanup # only need to call once - here o = self.obj_points # no property any more # REF IS IMAGE else: self.ref = imread(ref) # self._refshape = ref.shape[:2] self.pattern = PatternRecognition(self.ref) self._homography_is_fixed = False
[ "def", "setReference", "(", "self", ",", "ref", ")", ":", "# self.maps = {}\r", "self", ".", "quad", "=", "None", "# self.refQuad = None\r", "self", ".", "_camera_position", "=", "None", "self", ".", "_homography", "=", "None", "self", ".", "_homography_is_fixed", "=", "True", "# self.tvec, self.rvec = None, None\r", "self", ".", "_pose", "=", "None", "# evaluate input:\r", "if", "isinstance", "(", "ref", ",", "np", ".", "ndarray", ")", "and", "ref", ".", "shape", "==", "(", "3", ",", "3", ")", ":", "# REF IS HOMOGRAPHY\r", "self", ".", "_homography", "=", "ref", "# REF IS QUAD\r", "elif", "len", "(", "ref", ")", "==", "4", ":", "self", ".", "quad", "=", "sortCorners", "(", "ref", ")", "# TODO: cleanup # only need to call once - here\r", "o", "=", "self", ".", "obj_points", "# no property any more\r", "# REF IS IMAGE\r", "else", ":", "self", ".", "ref", "=", "imread", "(", "ref", ")", "# self._refshape = ref.shape[:2]\r", "self", ".", "pattern", "=", "PatternRecognition", "(", "self", ".", "ref", ")", "self", ".", "_homography_is_fixed", "=", "False" ]
ref ... either quad, grid, homography or reference image quad --> list of four image points(x,y) marking the edges of the quad to correct homography --> h. matrix to correct perspective distortion referenceImage --> image of same object without perspective distortion
[ "ref", "...", "either", "quad", "grid", "homography", "or", "reference", "image", "quad", "--", ">", "list", "of", "four", "image", "points", "(", "x", "y", ")", "marking", "the", "edges", "of", "the", "quad", "to", "correct", "homography", "--", ">", "h", ".", "matrix", "to", "correct", "perspective", "distortion", "referenceImage", "--", ">", "image", "of", "same", "object", "without", "perspective", "distortion" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L97-L131
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.distort
def distort(self, img, rotX=0, rotY=0, quad=None): ''' Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image ''' self.img = imread(img) # fit old image to self.quad: corr = self.correct(self.img) s = self.img.shape if quad is None: wquad = (self.quad - self.quad.mean(axis=0)).astype(float) win_width = s[1] win_height = s[0] # project quad: for n, q in enumerate(wquad): p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY) p = p.project(win_width, win_height, s[1], s[1]) wquad[n] = (p.x, p.y) wquad = sortCorners(wquad) # scale result so that longest side of quad and wquad are equal w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() scale = min(s[1] / w, s[0] / h) # scale: wquad = (wquad * scale).astype(int) else: wquad = sortCorners(quad) wquad -= wquad.min(axis=0) lx = corr.shape[1] ly = corr.shape[0] objP = np.array([ [0, 0], [lx, 0], [lx, ly], [0, ly], ], dtype=np.float32) homography = cv2.getPerspectiveTransform( wquad.astype(np.float32), objP) # distort corr: w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() #(int(w),int(h)) dist = cv2.warpPerspective(corr, homography, (int(w), int(h)), flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP) # move middle of dist to middle of the old quad: bg = np.zeros(shape=s) rmn = (bg.shape[0] / 2, bg.shape[1] / 2) ss = dist.shape mn = (ss[0] / 2, ss[1] / 2) # wquad.mean(axis=0) ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1])) bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist # finally move quad into right position: self.quad = wquad self.quad += (ref[1], ref[0]) self.img = bg self._homography = None self._poseFromQuad() if self.opts['do_correctIntensity']: tf = self.tiltFactor() if self.img.ndim == 3: for col in range(self.img.shape[2]): self.img[..., col] *= tf else: # tf = np.tile(tf, (1,1,self.img.shape[2])) self.img = self.img * tf return self.img
python
def distort(self, img, rotX=0, rotY=0, quad=None): ''' Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image ''' self.img = imread(img) # fit old image to self.quad: corr = self.correct(self.img) s = self.img.shape if quad is None: wquad = (self.quad - self.quad.mean(axis=0)).astype(float) win_width = s[1] win_height = s[0] # project quad: for n, q in enumerate(wquad): p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY) p = p.project(win_width, win_height, s[1], s[1]) wquad[n] = (p.x, p.y) wquad = sortCorners(wquad) # scale result so that longest side of quad and wquad are equal w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() scale = min(s[1] / w, s[0] / h) # scale: wquad = (wquad * scale).astype(int) else: wquad = sortCorners(quad) wquad -= wquad.min(axis=0) lx = corr.shape[1] ly = corr.shape[0] objP = np.array([ [0, 0], [lx, 0], [lx, ly], [0, ly], ], dtype=np.float32) homography = cv2.getPerspectiveTransform( wquad.astype(np.float32), objP) # distort corr: w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() #(int(w),int(h)) dist = cv2.warpPerspective(corr, homography, (int(w), int(h)), flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP) # move middle of dist to middle of the old quad: bg = np.zeros(shape=s) rmn = (bg.shape[0] / 2, bg.shape[1] / 2) ss = dist.shape mn = (ss[0] / 2, ss[1] / 2) # wquad.mean(axis=0) ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1])) bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist # finally move quad into right position: self.quad = wquad self.quad += (ref[1], ref[0]) self.img = bg self._homography = None self._poseFromQuad() if self.opts['do_correctIntensity']: tf = self.tiltFactor() if self.img.ndim == 3: for col in range(self.img.shape[2]): self.img[..., col] *= tf else: # tf = np.tile(tf, (1,1,self.img.shape[2])) self.img = self.img * tf return self.img
[ "def", "distort", "(", "self", ",", "img", ",", "rotX", "=", "0", ",", "rotY", "=", "0", ",", "quad", "=", "None", ")", ":", "self", ".", "img", "=", "imread", "(", "img", ")", "# fit old image to self.quad:\r", "corr", "=", "self", ".", "correct", "(", "self", ".", "img", ")", "s", "=", "self", ".", "img", ".", "shape", "if", "quad", "is", "None", ":", "wquad", "=", "(", "self", ".", "quad", "-", "self", ".", "quad", ".", "mean", "(", "axis", "=", "0", ")", ")", ".", "astype", "(", "float", ")", "win_width", "=", "s", "[", "1", "]", "win_height", "=", "s", "[", "0", "]", "# project quad:\r", "for", "n", ",", "q", "in", "enumerate", "(", "wquad", ")", ":", "p", "=", "Point3D", "(", "q", "[", "0", "]", ",", "q", "[", "1", "]", ",", "0", ")", ".", "rotateX", "(", "-", "rotX", ")", ".", "rotateY", "(", "-", "rotY", ")", "p", "=", "p", ".", "project", "(", "win_width", ",", "win_height", ",", "s", "[", "1", "]", ",", "s", "[", "1", "]", ")", "wquad", "[", "n", "]", "=", "(", "p", ".", "x", ",", "p", ".", "y", ")", "wquad", "=", "sortCorners", "(", "wquad", ")", "# scale result so that longest side of quad and wquad are equal\r", "w", "=", "wquad", "[", ":", ",", "0", "]", ".", "max", "(", ")", "-", "wquad", "[", ":", ",", "0", "]", ".", "min", "(", ")", "h", "=", "wquad", "[", ":", ",", "1", "]", ".", "max", "(", ")", "-", "wquad", "[", ":", ",", "1", "]", ".", "min", "(", ")", "scale", "=", "min", "(", "s", "[", "1", "]", "/", "w", ",", "s", "[", "0", "]", "/", "h", ")", "# scale:\r", "wquad", "=", "(", "wquad", "*", "scale", ")", ".", "astype", "(", "int", ")", "else", ":", "wquad", "=", "sortCorners", "(", "quad", ")", "wquad", "-=", "wquad", ".", "min", "(", "axis", "=", "0", ")", "lx", "=", "corr", ".", "shape", "[", "1", "]", "ly", "=", "corr", ".", "shape", "[", "0", "]", "objP", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", "]", ",", "[", "lx", ",", "0", "]", ",", "[", "lx", ",", "ly", "]", ",", "[", "0", ",", "ly", "]", ",", "]", ",", "dtype", "=", "np", ".", "float32", ")", "homography", "=", "cv2", ".", "getPerspectiveTransform", "(", "wquad", ".", "astype", "(", "np", ".", "float32", ")", ",", "objP", ")", "# distort corr:\r", "w", "=", "wquad", "[", ":", ",", "0", "]", ".", "max", "(", ")", "-", "wquad", "[", ":", ",", "0", "]", ".", "min", "(", ")", "h", "=", "wquad", "[", ":", ",", "1", "]", ".", "max", "(", ")", "-", "wquad", "[", ":", ",", "1", "]", ".", "min", "(", ")", "#(int(w),int(h))\r", "dist", "=", "cv2", ".", "warpPerspective", "(", "corr", ",", "homography", ",", "(", "int", "(", "w", ")", ",", "int", "(", "h", ")", ")", ",", "flags", "=", "cv2", ".", "INTER_CUBIC", "|", "cv2", ".", "WARP_INVERSE_MAP", ")", "# move middle of dist to middle of the old quad:\r", "bg", "=", "np", ".", "zeros", "(", "shape", "=", "s", ")", "rmn", "=", "(", "bg", ".", "shape", "[", "0", "]", "/", "2", ",", "bg", ".", "shape", "[", "1", "]", "/", "2", ")", "ss", "=", "dist", ".", "shape", "mn", "=", "(", "ss", "[", "0", "]", "/", "2", ",", "ss", "[", "1", "]", "/", "2", ")", "# wquad.mean(axis=0)\r", "ref", "=", "(", "int", "(", "rmn", "[", "0", "]", "-", "mn", "[", "0", "]", ")", ",", "int", "(", "rmn", "[", "1", "]", "-", "mn", "[", "1", "]", ")", ")", "bg", "[", "ref", "[", "0", "]", ":", "ss", "[", "0", "]", "+", "ref", "[", "0", "]", ",", "ref", "[", "1", "]", ":", "ss", "[", "1", "]", "+", "ref", "[", "1", "]", "]", "=", "dist", "# finally move quad into right position:\r", "self", ".", "quad", "=", "wquad", "self", ".", "quad", "+=", "(", "ref", "[", "1", "]", ",", "ref", "[", "0", "]", ")", "self", ".", "img", "=", "bg", "self", ".", "_homography", "=", "None", "self", ".", "_poseFromQuad", "(", ")", "if", "self", ".", "opts", "[", "'do_correctIntensity'", "]", ":", "tf", "=", "self", ".", "tiltFactor", "(", ")", "if", "self", ".", "img", ".", "ndim", "==", "3", ":", "for", "col", "in", "range", "(", "self", ".", "img", ".", "shape", "[", "2", "]", ")", ":", "self", ".", "img", "[", "...", ",", "col", "]", "*=", "tf", "else", ":", "# tf = np.tile(tf, (1,1,self.img.shape[2]))\r", "self", ".", "img", "=", "self", ".", "img", "*", "tf", "return", "self", ".", "img" ]
Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image
[ "Apply", "perspective", "distortion", "ion", "self", ".", "img", "angles", "are", "in", "DEG", "and", "need", "to", "be", "positive", "to", "fit", "into", "image" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L193-L270
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.correctGrid
def correctGrid(self, img, grid): ''' grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) ''' self.img = imread(img) h = self.homography # TODO: cleanup only needed to get newBorder attr. if self.opts['do_correctIntensity']: self.img = self.img / self._getTiltFactor(self.img.shape) s0, s1 = grid.shape[:2] n0, n1 = s0 - 1, s1 - 1 snew = self._newBorders b = self.opts['border'] sx, sy = (snew[0] - 2 * b) // n0, (snew[1] - 2 * b) // n1 out = np.empty(snew[::-1], dtype=self.img.dtype) def warp(ix, iy, objP, outcut): shape = outcut.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, outcut, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) return quad objP = np.array([[0, 0], [sx, 0], [sx, sy], [0, sy]], dtype=np.float32) # INNER CELLS for ix in range(1, n0 - 1): for iy in range(1, n1 - 1): sub = out[iy * sy + b: (iy + 1) * sy + b, ix * sx + b: (ix + 1) * sx + b] # warp(ix, iy, objP, sub) shape = sub.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] # print(quad, objP) hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, sub, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) # return out # TOP CELLS objP[:, 1] += b for ix in range(1, n0 - 1): warp(ix, 0, objP, out[: sy + b, ix * sx + b: (ix + 1) * sx + b]) # BOTTOM CELLS objP[:, 1] -= b for ix in range(1, n0 - 1): iy = (n1 - 1) y = iy * sy + b x = ix * sx + b warp(ix, iy, objP, out[y: y + sy + b, x: x + sx]) # LEFT CELLS objP[:, 0] += b for iy in range(1, n1 - 1): y = iy * sy + b warp(0, iy, objP, out[y: y + sy, : sx + b]) # RIGHT CELLS objP[:, 0] -= b ix = (n0 - 1) x = ix * sx + b for iy in range(1, n1 - 1): y = iy * sy + b warp(ix, iy, objP, out[y: y + sy, x: x + sx + b]) # BOTTOM RIGHT CORNER warp(n0 - 1, n1 - 1, objP, out[-sy - b - 1:, x: x + sx + b]) # #TOP LEFT CORNER objP += (b, b) warp(0, 0, objP, out[0: sy + b, 0: sx + b]) # TOP RIGHT CORNER objP[:, 0] -= b # x = (n0-1)*sx+b warp(n0 - 1, 0, objP, out[: sy + b, x: x + sx + b]) # #BOTTOM LEFT CORNER objP += (b, -b) warp(0, n1 - 1, objP, out[-sy - b - 1:, : sx + b]) return out
python
def correctGrid(self, img, grid): ''' grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) ''' self.img = imread(img) h = self.homography # TODO: cleanup only needed to get newBorder attr. if self.opts['do_correctIntensity']: self.img = self.img / self._getTiltFactor(self.img.shape) s0, s1 = grid.shape[:2] n0, n1 = s0 - 1, s1 - 1 snew = self._newBorders b = self.opts['border'] sx, sy = (snew[0] - 2 * b) // n0, (snew[1] - 2 * b) // n1 out = np.empty(snew[::-1], dtype=self.img.dtype) def warp(ix, iy, objP, outcut): shape = outcut.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, outcut, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) return quad objP = np.array([[0, 0], [sx, 0], [sx, sy], [0, sy]], dtype=np.float32) # INNER CELLS for ix in range(1, n0 - 1): for iy in range(1, n1 - 1): sub = out[iy * sy + b: (iy + 1) * sy + b, ix * sx + b: (ix + 1) * sx + b] # warp(ix, iy, objP, sub) shape = sub.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] # print(quad, objP) hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, sub, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) # return out # TOP CELLS objP[:, 1] += b for ix in range(1, n0 - 1): warp(ix, 0, objP, out[: sy + b, ix * sx + b: (ix + 1) * sx + b]) # BOTTOM CELLS objP[:, 1] -= b for ix in range(1, n0 - 1): iy = (n1 - 1) y = iy * sy + b x = ix * sx + b warp(ix, iy, objP, out[y: y + sy + b, x: x + sx]) # LEFT CELLS objP[:, 0] += b for iy in range(1, n1 - 1): y = iy * sy + b warp(0, iy, objP, out[y: y + sy, : sx + b]) # RIGHT CELLS objP[:, 0] -= b ix = (n0 - 1) x = ix * sx + b for iy in range(1, n1 - 1): y = iy * sy + b warp(ix, iy, objP, out[y: y + sy, x: x + sx + b]) # BOTTOM RIGHT CORNER warp(n0 - 1, n1 - 1, objP, out[-sy - b - 1:, x: x + sx + b]) # #TOP LEFT CORNER objP += (b, b) warp(0, 0, objP, out[0: sy + b, 0: sx + b]) # TOP RIGHT CORNER objP[:, 0] -= b # x = (n0-1)*sx+b warp(n0 - 1, 0, objP, out[: sy + b, x: x + sx + b]) # #BOTTOM LEFT CORNER objP += (b, -b) warp(0, n1 - 1, objP, out[-sy - b - 1:, : sx + b]) return out
[ "def", "correctGrid", "(", "self", ",", "img", ",", "grid", ")", ":", "self", ".", "img", "=", "imread", "(", "img", ")", "h", "=", "self", ".", "homography", "# TODO: cleanup only needed to get newBorder attr.\r", "if", "self", ".", "opts", "[", "'do_correctIntensity'", "]", ":", "self", ".", "img", "=", "self", ".", "img", "/", "self", ".", "_getTiltFactor", "(", "self", ".", "img", ".", "shape", ")", "s0", ",", "s1", "=", "grid", ".", "shape", "[", ":", "2", "]", "n0", ",", "n1", "=", "s0", "-", "1", ",", "s1", "-", "1", "snew", "=", "self", ".", "_newBorders", "b", "=", "self", ".", "opts", "[", "'border'", "]", "sx", ",", "sy", "=", "(", "snew", "[", "0", "]", "-", "2", "*", "b", ")", "//", "n0", ",", "(", "snew", "[", "1", "]", "-", "2", "*", "b", ")", "//", "n1", "out", "=", "np", ".", "empty", "(", "snew", "[", ":", ":", "-", "1", "]", ",", "dtype", "=", "self", ".", "img", ".", "dtype", ")", "def", "warp", "(", "ix", ",", "iy", ",", "objP", ",", "outcut", ")", ":", "shape", "=", "outcut", ".", "shape", "[", ":", ":", "-", "1", "]", "quad", "=", "grid", "[", "ix", ":", "ix", "+", "2", ",", "iy", ":", "iy", "+", "2", "]", ".", "reshape", "(", "4", ",", "2", ")", "[", "np", ".", "array", "(", "[", "0", ",", "2", ",", "3", ",", "1", "]", ")", "]", "hcell", "=", "cv2", ".", "getPerspectiveTransform", "(", "quad", ".", "astype", "(", "np", ".", "float32", ")", ",", "objP", ")", "cv2", ".", "warpPerspective", "(", "self", ".", "img", ",", "hcell", ",", "shape", ",", "outcut", ",", "flags", "=", "cv2", ".", "INTER_LANCZOS4", ",", "*", "*", "self", ".", "opts", "[", "'cv2_opts'", "]", ")", "return", "quad", "objP", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", "]", ",", "[", "sx", ",", "0", "]", ",", "[", "sx", ",", "sy", "]", ",", "[", "0", ",", "sy", "]", "]", ",", "dtype", "=", "np", ".", "float32", ")", "# INNER CELLS\r", "for", "ix", "in", "range", "(", "1", ",", "n0", "-", "1", ")", ":", "for", "iy", "in", "range", "(", "1", ",", "n1", "-", "1", ")", ":", "sub", "=", "out", "[", "iy", "*", "sy", "+", "b", ":", "(", "iy", "+", "1", ")", "*", "sy", "+", "b", ",", "ix", "*", "sx", "+", "b", ":", "(", "ix", "+", "1", ")", "*", "sx", "+", "b", "]", "# warp(ix, iy, objP, sub)\r", "shape", "=", "sub", ".", "shape", "[", ":", ":", "-", "1", "]", "quad", "=", "grid", "[", "ix", ":", "ix", "+", "2", ",", "iy", ":", "iy", "+", "2", "]", ".", "reshape", "(", "4", ",", "2", ")", "[", "np", ".", "array", "(", "[", "0", ",", "2", ",", "3", ",", "1", "]", ")", "]", "# print(quad, objP)\r", "hcell", "=", "cv2", ".", "getPerspectiveTransform", "(", "quad", ".", "astype", "(", "np", ".", "float32", ")", ",", "objP", ")", "cv2", ".", "warpPerspective", "(", "self", ".", "img", ",", "hcell", ",", "shape", ",", "sub", ",", "flags", "=", "cv2", ".", "INTER_LANCZOS4", ",", "*", "*", "self", ".", "opts", "[", "'cv2_opts'", "]", ")", "# return out\r", "# TOP CELLS\r", "objP", "[", ":", ",", "1", "]", "+=", "b", "for", "ix", "in", "range", "(", "1", ",", "n0", "-", "1", ")", ":", "warp", "(", "ix", ",", "0", ",", "objP", ",", "out", "[", ":", "sy", "+", "b", ",", "ix", "*", "sx", "+", "b", ":", "(", "ix", "+", "1", ")", "*", "sx", "+", "b", "]", ")", "# BOTTOM CELLS\r", "objP", "[", ":", ",", "1", "]", "-=", "b", "for", "ix", "in", "range", "(", "1", ",", "n0", "-", "1", ")", ":", "iy", "=", "(", "n1", "-", "1", ")", "y", "=", "iy", "*", "sy", "+", "b", "x", "=", "ix", "*", "sx", "+", "b", "warp", "(", "ix", ",", "iy", ",", "objP", ",", "out", "[", "y", ":", "y", "+", "sy", "+", "b", ",", "x", ":", "x", "+", "sx", "]", ")", "# LEFT CELLS\r", "objP", "[", ":", ",", "0", "]", "+=", "b", "for", "iy", "in", "range", "(", "1", ",", "n1", "-", "1", ")", ":", "y", "=", "iy", "*", "sy", "+", "b", "warp", "(", "0", ",", "iy", ",", "objP", ",", "out", "[", "y", ":", "y", "+", "sy", ",", ":", "sx", "+", "b", "]", ")", "# RIGHT CELLS\r", "objP", "[", ":", ",", "0", "]", "-=", "b", "ix", "=", "(", "n0", "-", "1", ")", "x", "=", "ix", "*", "sx", "+", "b", "for", "iy", "in", "range", "(", "1", ",", "n1", "-", "1", ")", ":", "y", "=", "iy", "*", "sy", "+", "b", "warp", "(", "ix", ",", "iy", ",", "objP", ",", "out", "[", "y", ":", "y", "+", "sy", ",", "x", ":", "x", "+", "sx", "+", "b", "]", ")", "# BOTTOM RIGHT CORNER\r", "warp", "(", "n0", "-", "1", ",", "n1", "-", "1", ",", "objP", ",", "out", "[", "-", "sy", "-", "b", "-", "1", ":", ",", "x", ":", "x", "+", "sx", "+", "b", "]", ")", "# #TOP LEFT CORNER\r", "objP", "+=", "(", "b", ",", "b", ")", "warp", "(", "0", ",", "0", ",", "objP", ",", "out", "[", "0", ":", "sy", "+", "b", ",", "0", ":", "sx", "+", "b", "]", ")", "# TOP RIGHT CORNER\r", "objP", "[", ":", ",", "0", "]", "-=", "b", "# x = (n0-1)*sx+b\r", "warp", "(", "n0", "-", "1", ",", "0", ",", "objP", ",", "out", "[", ":", "sy", "+", "b", ",", "x", ":", "x", "+", "sx", "+", "b", "]", ")", "# #BOTTOM LEFT CORNER\r", "objP", "+=", "(", "b", ",", "-", "b", ")", "warp", "(", "0", ",", "n1", "-", "1", ",", "objP", ",", "out", "[", "-", "sy", "-", "b", "-", "1", ":", ",", ":", "sx", "+", "b", "]", ")", "return", "out" ]
grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,)
[ "grid", "-", ">", "array", "of", "polylines", "=", "((", "p0x", "p0y", ")", "(", "p1x", "p1y", ")", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L281-L372
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.correct
def correct(self, img): ''' ...from perspective distortion: --> perspective transformation --> apply tilt factor (view factor) correction ''' print("CORRECT PERSPECTIVE ...") self.img = imread(img) if not self._homography_is_fixed: self._homography = None h = self.homography if self.opts['do_correctIntensity']: tf = self.tiltFactor() self.img = np.asfarray(self.img) if self.img.ndim == 3: for col in range(self.img.shape[2]): self.img[..., col] /= tf else: self.img = self.img / tf warped = cv2.warpPerspective(self.img, h, self._newBorders[::-1], flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) return warped
python
def correct(self, img): ''' ...from perspective distortion: --> perspective transformation --> apply tilt factor (view factor) correction ''' print("CORRECT PERSPECTIVE ...") self.img = imread(img) if not self._homography_is_fixed: self._homography = None h = self.homography if self.opts['do_correctIntensity']: tf = self.tiltFactor() self.img = np.asfarray(self.img) if self.img.ndim == 3: for col in range(self.img.shape[2]): self.img[..., col] /= tf else: self.img = self.img / tf warped = cv2.warpPerspective(self.img, h, self._newBorders[::-1], flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) return warped
[ "def", "correct", "(", "self", ",", "img", ")", ":", "print", "(", "\"CORRECT PERSPECTIVE ...\"", ")", "self", ".", "img", "=", "imread", "(", "img", ")", "if", "not", "self", ".", "_homography_is_fixed", ":", "self", ".", "_homography", "=", "None", "h", "=", "self", ".", "homography", "if", "self", ".", "opts", "[", "'do_correctIntensity'", "]", ":", "tf", "=", "self", ".", "tiltFactor", "(", ")", "self", ".", "img", "=", "np", ".", "asfarray", "(", "self", ".", "img", ")", "if", "self", ".", "img", ".", "ndim", "==", "3", ":", "for", "col", "in", "range", "(", "self", ".", "img", ".", "shape", "[", "2", "]", ")", ":", "self", ".", "img", "[", "...", ",", "col", "]", "/=", "tf", "else", ":", "self", ".", "img", "=", "self", ".", "img", "/", "tf", "warped", "=", "cv2", ".", "warpPerspective", "(", "self", ".", "img", ",", "h", ",", "self", ".", "_newBorders", "[", ":", ":", "-", "1", "]", ",", "flags", "=", "cv2", ".", "INTER_LANCZOS4", ",", "*", "*", "self", ".", "opts", "[", "'cv2_opts'", "]", ")", "return", "warped" ]
...from perspective distortion: --> perspective transformation --> apply tilt factor (view factor) correction
[ "...", "from", "perspective", "distortion", ":", "--", ">", "perspective", "transformation", "--", ">", "apply", "tilt", "factor", "(", "view", "factor", ")", "correction" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L380-L406
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.camera_position
def camera_position(self, pose=None): ''' returns camera position in world coordinates using self.rvec and self.tvec from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector ''' if pose is None: pose = self.pose() t, r = pose return -np.matrix(cv2.Rodrigues(r)[0]).T * np.matrix(t)
python
def camera_position(self, pose=None): ''' returns camera position in world coordinates using self.rvec and self.tvec from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector ''' if pose is None: pose = self.pose() t, r = pose return -np.matrix(cv2.Rodrigues(r)[0]).T * np.matrix(t)
[ "def", "camera_position", "(", "self", ",", "pose", "=", "None", ")", ":", "if", "pose", "is", "None", ":", "pose", "=", "self", ".", "pose", "(", ")", "t", ",", "r", "=", "pose", "return", "-", "np", ".", "matrix", "(", "cv2", ".", "Rodrigues", "(", "r", ")", "[", "0", "]", ")", ".", "T", "*", "np", ".", "matrix", "(", "t", ")" ]
returns camera position in world coordinates using self.rvec and self.tvec from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector
[ "returns", "camera", "position", "in", "world", "coordinates", "using", "self", ".", "rvec", "and", "self", ".", "tvec", "from", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "14515200", "/", "python", "-", "opencv", "-", "solvepnp", "-", "yields", "-", "wrong", "-", "translation", "-", "vector" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L417-L425
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.viewAngle
def viewAngle(self, **kwargs): ''' calculate view factor between one small and one finite surface vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA according to VDI heatatlas 2010 p961 ''' v0 = self.cam2PlaneVectorField(**kwargs) # obj cannot be behind camera v0[2][v0[2] < 0] = np.nan _t, r = self.pose() n = self.planeSfN(r) # because of different x,y orientation: n[2] *= -1 # beta2 = vectorAngle(v0, vectorToField(n) ) beta2 = vectorAngle(v0, n) return beta2
python
def viewAngle(self, **kwargs): ''' calculate view factor between one small and one finite surface vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA according to VDI heatatlas 2010 p961 ''' v0 = self.cam2PlaneVectorField(**kwargs) # obj cannot be behind camera v0[2][v0[2] < 0] = np.nan _t, r = self.pose() n = self.planeSfN(r) # because of different x,y orientation: n[2] *= -1 # beta2 = vectorAngle(v0, vectorToField(n) ) beta2 = vectorAngle(v0, n) return beta2
[ "def", "viewAngle", "(", "self", ",", "*", "*", "kwargs", ")", ":", "v0", "=", "self", ".", "cam2PlaneVectorField", "(", "*", "*", "kwargs", ")", "# obj cannot be behind camera\r", "v0", "[", "2", "]", "[", "v0", "[", "2", "]", "<", "0", "]", "=", "np", ".", "nan", "_t", ",", "r", "=", "self", ".", "pose", "(", ")", "n", "=", "self", ".", "planeSfN", "(", "r", ")", "# because of different x,y orientation:\r", "n", "[", "2", "]", "*=", "-", "1", "# beta2 = vectorAngle(v0, vectorToField(n) )\r", "beta2", "=", "vectorAngle", "(", "v0", ",", "n", ")", "return", "beta2" ]
calculate view factor between one small and one finite surface vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA according to VDI heatatlas 2010 p961
[ "calculate", "view", "factor", "between", "one", "small", "and", "one", "finite", "surface", "vf", "=", "1", "/", "pi", "*", "integral", "(", "cos", "(", "beta1", ")", "*", "cos", "(", "beta2", ")", "/", "s", "**", "2", ")", "*", "dA", "according", "to", "VDI", "heatatlas", "2010", "p961" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L488-L504
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.foreground
def foreground(self, quad=None): '''return foreground (quad) mask''' fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8) if quad is None: quad = self.quad else: quad = quad.astype(np.int32) cv2.fillConvexPoly(fg, quad, 1) return fg.astype(bool)
python
def foreground(self, quad=None): '''return foreground (quad) mask''' fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8) if quad is None: quad = self.quad else: quad = quad.astype(np.int32) cv2.fillConvexPoly(fg, quad, 1) return fg.astype(bool)
[ "def", "foreground", "(", "self", ",", "quad", "=", "None", ")", ":", "fg", "=", "np", ".", "zeros", "(", "shape", "=", "self", ".", "_newBorders", "[", ":", ":", "-", "1", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "if", "quad", "is", "None", ":", "quad", "=", "self", ".", "quad", "else", ":", "quad", "=", "quad", ".", "astype", "(", "np", ".", "int32", ")", "cv2", ".", "fillConvexPoly", "(", "fg", ",", "quad", ",", "1", ")", "return", "fg", ".", "astype", "(", "bool", ")" ]
return foreground (quad) mask
[ "return", "foreground", "(", "quad", ")", "mask" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L506-L514
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.tiltFactor
def tiltFactor(self, midpointdepth=None, printAvAngle=False): ''' get tilt factor from inverse distance law https://en.wikipedia.org/wiki/Inverse-square_law ''' # TODO: can also be only def. with FOV, rot, tilt beta2 = self.viewAngle(midpointdepth=midpointdepth) try: angles, vals = getattr( emissivity_vs_angle, self.opts['material'])() except AttributeError: raise AttributeError("material[%s] is not in list of know materials: %s" % ( self.opts['material'], [o[0] for o in getmembers(emissivity_vs_angle) if isfunction(o[1])])) if printAvAngle: avg_angle = beta2[self.foreground()].mean() print('angle: %s DEG' % np.degrees(avg_angle)) # use averaged angle instead of beta2 to not overemphasize correction normEmissivity = np.clip( InterpolatedUnivariateSpline( np.radians(angles), vals)(beta2), 0, 1) return normEmissivity
python
def tiltFactor(self, midpointdepth=None, printAvAngle=False): ''' get tilt factor from inverse distance law https://en.wikipedia.org/wiki/Inverse-square_law ''' # TODO: can also be only def. with FOV, rot, tilt beta2 = self.viewAngle(midpointdepth=midpointdepth) try: angles, vals = getattr( emissivity_vs_angle, self.opts['material'])() except AttributeError: raise AttributeError("material[%s] is not in list of know materials: %s" % ( self.opts['material'], [o[0] for o in getmembers(emissivity_vs_angle) if isfunction(o[1])])) if printAvAngle: avg_angle = beta2[self.foreground()].mean() print('angle: %s DEG' % np.degrees(avg_angle)) # use averaged angle instead of beta2 to not overemphasize correction normEmissivity = np.clip( InterpolatedUnivariateSpline( np.radians(angles), vals)(beta2), 0, 1) return normEmissivity
[ "def", "tiltFactor", "(", "self", ",", "midpointdepth", "=", "None", ",", "printAvAngle", "=", "False", ")", ":", "# TODO: can also be only def. with FOV, rot, tilt\r", "beta2", "=", "self", ".", "viewAngle", "(", "midpointdepth", "=", "midpointdepth", ")", "try", ":", "angles", ",", "vals", "=", "getattr", "(", "emissivity_vs_angle", ",", "self", ".", "opts", "[", "'material'", "]", ")", "(", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"material[%s] is not in list of know materials: %s\"", "%", "(", "self", ".", "opts", "[", "'material'", "]", ",", "[", "o", "[", "0", "]", "for", "o", "in", "getmembers", "(", "emissivity_vs_angle", ")", "if", "isfunction", "(", "o", "[", "1", "]", ")", "]", ")", ")", "if", "printAvAngle", ":", "avg_angle", "=", "beta2", "[", "self", ".", "foreground", "(", ")", "]", ".", "mean", "(", ")", "print", "(", "'angle: %s DEG'", "%", "np", ".", "degrees", "(", "avg_angle", ")", ")", "# use averaged angle instead of beta2 to not overemphasize correction\r", "normEmissivity", "=", "np", ".", "clip", "(", "InterpolatedUnivariateSpline", "(", "np", ".", "radians", "(", "angles", ")", ",", "vals", ")", "(", "beta2", ")", ",", "0", ",", "1", ")", "return", "normEmissivity" ]
get tilt factor from inverse distance law https://en.wikipedia.org/wiki/Inverse-square_law
[ "get", "tilt", "factor", "from", "inverse", "distance", "law", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Inverse", "-", "square_law" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L516-L539
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.standardUncertainties
def standardUncertainties(self, focal_Length_mm, f_number, midpointdepth=1000, focusAtYX=None, # sigma_best_focus=0, # quad_pos_err=0, shape=None, uncertainties=(0, 0)): ''' focusAtXY - image position with is in focus if not set it is assumed that the image middle is in focus sigma_best_focus - standard deviation of the PSF within the best focus (default blur) uncertainties - contibutors for standard uncertainty these need to be perspective transformed to fit the new image shape ''' # TODO: consider quad_pos_error # (also influences intensity corr map) if shape is None: s = self.img.shape else: s = shape # 1. DEFOCUS DUE TO DEPTH OF FIELD ################################## depthMap = self.depthMap(midpointdepth) if focusAtYX is None: # assume image middle is in-focus: focusAtYX = s[0] // 2, s[1] // 2 infocusDepth = depthMap[focusAtYX] depthOfField_blur = defocusThroughDepth( depthMap, infocusDepth, focal_Length_mm, f_number, k=2.335) # 2. INCREAASED PIXEL SIZE DUE TO INTERPOLATION BETWEEN # PIXELS MOVED APARD ###################################################### # index maps: py, px = np.mgrid[0:s[0], 0:s[1]] # warped index maps: wx = cv2.warpPerspective(np.asfarray(px), self.homography, self._newBorders, borderValue=np.nan, flags=cv2.INTER_LANCZOS4) wy = cv2.warpPerspective(np.asfarray(py), self.homography, self._newBorders, borderValue=np.nan, flags=cv2.INTER_LANCZOS4) pxSizeFactorX = 1 / np.abs(np.gradient(wx)[1]) pxSizeFactorY = 1 / np.abs(np.gradient(wy)[0]) # WARP ALL FIELD TO NEW PERSPECTIVE AND MULTIPLY WITH PXSIZE FACTOR: depthOfField_blur = cv2.warpPerspective( depthOfField_blur, self.homography, self._newBorders, borderValue=np.nan, ) # perspective transform given uncertainties: warpedU = [] for u in uncertainties: # warpedU.append([]) # for i in u: # print i, type(i), isinstance(i, np.ndarray) if isinstance(u, np.ndarray) and u.size > 1: u = cv2.warpPerspective(u, self.homography, self._newBorders, borderValue=np.nan, flags=cv2.INTER_LANCZOS4) # *f else: # multiply with area ratio: after/before perspective warp u *= self.areaRatio warpedU.append(u) # given uncertainties after warp: ux, uy = warpedU ux = pxSizeFactorX * (ux**2 + depthOfField_blur**2)**0.5 uy = pxSizeFactorY * (uy**2 + depthOfField_blur**2)**0.5 # TODO: remove depthOfField_blur,fx,fy from return return ux, uy, depthOfField_blur, pxSizeFactorX, pxSizeFactorY
python
def standardUncertainties(self, focal_Length_mm, f_number, midpointdepth=1000, focusAtYX=None, # sigma_best_focus=0, # quad_pos_err=0, shape=None, uncertainties=(0, 0)): ''' focusAtXY - image position with is in focus if not set it is assumed that the image middle is in focus sigma_best_focus - standard deviation of the PSF within the best focus (default blur) uncertainties - contibutors for standard uncertainty these need to be perspective transformed to fit the new image shape ''' # TODO: consider quad_pos_error # (also influences intensity corr map) if shape is None: s = self.img.shape else: s = shape # 1. DEFOCUS DUE TO DEPTH OF FIELD ################################## depthMap = self.depthMap(midpointdepth) if focusAtYX is None: # assume image middle is in-focus: focusAtYX = s[0] // 2, s[1] // 2 infocusDepth = depthMap[focusAtYX] depthOfField_blur = defocusThroughDepth( depthMap, infocusDepth, focal_Length_mm, f_number, k=2.335) # 2. INCREAASED PIXEL SIZE DUE TO INTERPOLATION BETWEEN # PIXELS MOVED APARD ###################################################### # index maps: py, px = np.mgrid[0:s[0], 0:s[1]] # warped index maps: wx = cv2.warpPerspective(np.asfarray(px), self.homography, self._newBorders, borderValue=np.nan, flags=cv2.INTER_LANCZOS4) wy = cv2.warpPerspective(np.asfarray(py), self.homography, self._newBorders, borderValue=np.nan, flags=cv2.INTER_LANCZOS4) pxSizeFactorX = 1 / np.abs(np.gradient(wx)[1]) pxSizeFactorY = 1 / np.abs(np.gradient(wy)[0]) # WARP ALL FIELD TO NEW PERSPECTIVE AND MULTIPLY WITH PXSIZE FACTOR: depthOfField_blur = cv2.warpPerspective( depthOfField_blur, self.homography, self._newBorders, borderValue=np.nan, ) # perspective transform given uncertainties: warpedU = [] for u in uncertainties: # warpedU.append([]) # for i in u: # print i, type(i), isinstance(i, np.ndarray) if isinstance(u, np.ndarray) and u.size > 1: u = cv2.warpPerspective(u, self.homography, self._newBorders, borderValue=np.nan, flags=cv2.INTER_LANCZOS4) # *f else: # multiply with area ratio: after/before perspective warp u *= self.areaRatio warpedU.append(u) # given uncertainties after warp: ux, uy = warpedU ux = pxSizeFactorX * (ux**2 + depthOfField_blur**2)**0.5 uy = pxSizeFactorY * (uy**2 + depthOfField_blur**2)**0.5 # TODO: remove depthOfField_blur,fx,fy from return return ux, uy, depthOfField_blur, pxSizeFactorX, pxSizeFactorY
[ "def", "standardUncertainties", "(", "self", ",", "focal_Length_mm", ",", "f_number", ",", "midpointdepth", "=", "1000", ",", "focusAtYX", "=", "None", ",", "# sigma_best_focus=0,\r", "# quad_pos_err=0,\r", "shape", "=", "None", ",", "uncertainties", "=", "(", "0", ",", "0", ")", ")", ":", "# TODO: consider quad_pos_error\r", "# (also influences intensity corr map)\r", "if", "shape", "is", "None", ":", "s", "=", "self", ".", "img", ".", "shape", "else", ":", "s", "=", "shape", "# 1. DEFOCUS DUE TO DEPTH OF FIELD\r", "##################################\r", "depthMap", "=", "self", ".", "depthMap", "(", "midpointdepth", ")", "if", "focusAtYX", "is", "None", ":", "# assume image middle is in-focus:\r", "focusAtYX", "=", "s", "[", "0", "]", "//", "2", ",", "s", "[", "1", "]", "//", "2", "infocusDepth", "=", "depthMap", "[", "focusAtYX", "]", "depthOfField_blur", "=", "defocusThroughDepth", "(", "depthMap", ",", "infocusDepth", ",", "focal_Length_mm", ",", "f_number", ",", "k", "=", "2.335", ")", "# 2. INCREAASED PIXEL SIZE DUE TO INTERPOLATION BETWEEN\r", "# PIXELS MOVED APARD\r", "######################################################\r", "# index maps:\r", "py", ",", "px", "=", "np", ".", "mgrid", "[", "0", ":", "s", "[", "0", "]", ",", "0", ":", "s", "[", "1", "]", "]", "# warped index maps:\r", "wx", "=", "cv2", ".", "warpPerspective", "(", "np", ".", "asfarray", "(", "px", ")", ",", "self", ".", "homography", ",", "self", ".", "_newBorders", ",", "borderValue", "=", "np", ".", "nan", ",", "flags", "=", "cv2", ".", "INTER_LANCZOS4", ")", "wy", "=", "cv2", ".", "warpPerspective", "(", "np", ".", "asfarray", "(", "py", ")", ",", "self", ".", "homography", ",", "self", ".", "_newBorders", ",", "borderValue", "=", "np", ".", "nan", ",", "flags", "=", "cv2", ".", "INTER_LANCZOS4", ")", "pxSizeFactorX", "=", "1", "/", "np", ".", "abs", "(", "np", ".", "gradient", "(", "wx", ")", "[", "1", "]", ")", "pxSizeFactorY", "=", "1", "/", "np", ".", "abs", "(", "np", ".", "gradient", "(", "wy", ")", "[", "0", "]", ")", "# WARP ALL FIELD TO NEW PERSPECTIVE AND MULTIPLY WITH PXSIZE FACTOR:\r", "depthOfField_blur", "=", "cv2", ".", "warpPerspective", "(", "depthOfField_blur", ",", "self", ".", "homography", ",", "self", ".", "_newBorders", ",", "borderValue", "=", "np", ".", "nan", ",", ")", "# perspective transform given uncertainties:\r", "warpedU", "=", "[", "]", "for", "u", "in", "uncertainties", ":", "# warpedU.append([])\r", "# for i in u:\r", "# print i, type(i), isinstance(i, np.ndarray)\r", "if", "isinstance", "(", "u", ",", "np", ".", "ndarray", ")", "and", "u", ".", "size", ">", "1", ":", "u", "=", "cv2", ".", "warpPerspective", "(", "u", ",", "self", ".", "homography", ",", "self", ".", "_newBorders", ",", "borderValue", "=", "np", ".", "nan", ",", "flags", "=", "cv2", ".", "INTER_LANCZOS4", ")", "# *f\r", "else", ":", "# multiply with area ratio: after/before perspective warp\r", "u", "*=", "self", ".", "areaRatio", "warpedU", ".", "append", "(", "u", ")", "# given uncertainties after warp:\r", "ux", ",", "uy", "=", "warpedU", "ux", "=", "pxSizeFactorX", "*", "(", "ux", "**", "2", "+", "depthOfField_blur", "**", "2", ")", "**", "0.5", "uy", "=", "pxSizeFactorY", "*", "(", "uy", "**", "2", "+", "depthOfField_blur", "**", "2", ")", "**", "0.5", "# TODO: remove depthOfField_blur,fx,fy from return\r", "return", "ux", ",", "uy", ",", "depthOfField_blur", ",", "pxSizeFactorX", ",", "pxSizeFactorY" ]
focusAtXY - image position with is in focus if not set it is assumed that the image middle is in focus sigma_best_focus - standard deviation of the PSF within the best focus (default blur) uncertainties - contibutors for standard uncertainty these need to be perspective transformed to fit the new image shape
[ "focusAtXY", "-", "image", "position", "with", "is", "in", "focus", "if", "not", "set", "it", "is", "assumed", "that", "the", "image", "middle", "is", "in", "focus", "sigma_best_focus", "-", "standard", "deviation", "of", "the", "PSF", "within", "the", "best", "focus", "(", "default", "blur", ")", "uncertainties", "-", "contibutors", "for", "standard", "uncertainty", "these", "need", "to", "be", "perspective", "transformed", "to", "fit", "the", "new", "image", "shape" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L555-L637
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection._poseFromQuad
def _poseFromQuad(self, quad=None): ''' estimate the pose of the object plane using quad setting: self.rvec -> rotation vector self.tvec -> translation vector ''' if quad is None: quad = self.quad if quad.ndim == 3: quad = quad[0] # http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/ # Find the rotation and translation vectors. img_pn = np.ascontiguousarray(quad[:, :2], dtype=np.float32).reshape((4, 1, 2)) obj_pn = self.obj_points - self.obj_points.mean(axis=0) retval, rvec, tvec = cv2.solvePnP( obj_pn, img_pn, self.opts['cameraMatrix'], self.opts['distCoeffs'], flags=cv2.SOLVEPNP_P3P # because exactly four points are given ) if not retval: print("Couln't estimate pose") return tvec, rvec
python
def _poseFromQuad(self, quad=None): ''' estimate the pose of the object plane using quad setting: self.rvec -> rotation vector self.tvec -> translation vector ''' if quad is None: quad = self.quad if quad.ndim == 3: quad = quad[0] # http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/ # Find the rotation and translation vectors. img_pn = np.ascontiguousarray(quad[:, :2], dtype=np.float32).reshape((4, 1, 2)) obj_pn = self.obj_points - self.obj_points.mean(axis=0) retval, rvec, tvec = cv2.solvePnP( obj_pn, img_pn, self.opts['cameraMatrix'], self.opts['distCoeffs'], flags=cv2.SOLVEPNP_P3P # because exactly four points are given ) if not retval: print("Couln't estimate pose") return tvec, rvec
[ "def", "_poseFromQuad", "(", "self", ",", "quad", "=", "None", ")", ":", "if", "quad", "is", "None", ":", "quad", "=", "self", ".", "quad", "if", "quad", ".", "ndim", "==", "3", ":", "quad", "=", "quad", "[", "0", "]", "# http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/\r", "# Find the rotation and translation vectors.\r", "img_pn", "=", "np", ".", "ascontiguousarray", "(", "quad", "[", ":", ",", ":", "2", "]", ",", "dtype", "=", "np", ".", "float32", ")", ".", "reshape", "(", "(", "4", ",", "1", ",", "2", ")", ")", "obj_pn", "=", "self", ".", "obj_points", "-", "self", ".", "obj_points", ".", "mean", "(", "axis", "=", "0", ")", "retval", ",", "rvec", ",", "tvec", "=", "cv2", ".", "solvePnP", "(", "obj_pn", ",", "img_pn", ",", "self", ".", "opts", "[", "'cameraMatrix'", "]", ",", "self", ".", "opts", "[", "'distCoeffs'", "]", ",", "flags", "=", "cv2", ".", "SOLVEPNP_P3P", "# because exactly four points are given\r", ")", "if", "not", "retval", ":", "print", "(", "\"Couln't estimate pose\"", ")", "return", "tvec", ",", "rvec" ]
estimate the pose of the object plane using quad setting: self.rvec -> rotation vector self.tvec -> translation vector
[ "estimate", "the", "pose", "of", "the", "object", "plane", "using", "quad", "setting", ":", "self", ".", "rvec", "-", ">", "rotation", "vector", "self", ".", "tvec", "-", ">", "translation", "vector" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L692-L718
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.drawQuad
def drawQuad(self, img=None, quad=None, thickness=30): ''' Draw the quad into given img ''' if img is None: img = self.img if quad is None: quad = self.quad q = np.int32(quad) c = int(img.max()) cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness) cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness) cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness) cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness) return img
python
def drawQuad(self, img=None, quad=None, thickness=30): ''' Draw the quad into given img ''' if img is None: img = self.img if quad is None: quad = self.quad q = np.int32(quad) c = int(img.max()) cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness) cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness) cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness) cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness) return img
[ "def", "drawQuad", "(", "self", ",", "img", "=", "None", ",", "quad", "=", "None", ",", "thickness", "=", "30", ")", ":", "if", "img", "is", "None", ":", "img", "=", "self", ".", "img", "if", "quad", "is", "None", ":", "quad", "=", "self", ".", "quad", "q", "=", "np", ".", "int32", "(", "quad", ")", "c", "=", "int", "(", "img", ".", "max", "(", ")", ")", "cv2", ".", "line", "(", "img", ",", "tuple", "(", "q", "[", "0", "]", ")", ",", "tuple", "(", "q", "[", "1", "]", ")", ",", "c", ",", "thickness", ")", "cv2", ".", "line", "(", "img", ",", "tuple", "(", "q", "[", "1", "]", ")", ",", "tuple", "(", "q", "[", "2", "]", ")", ",", "c", ",", "thickness", ")", "cv2", ".", "line", "(", "img", ",", "tuple", "(", "q", "[", "2", "]", ")", ",", "tuple", "(", "q", "[", "3", "]", ")", ",", "c", ",", "thickness", ")", "cv2", ".", "line", "(", "img", ",", "tuple", "(", "q", "[", "3", "]", ")", ",", "tuple", "(", "q", "[", "0", "]", ")", ",", "c", ",", "thickness", ")", "return", "img" ]
Draw the quad into given img
[ "Draw", "the", "quad", "into", "given", "img" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L761-L775
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection.draw3dCoordAxis
def draw3dCoordAxis(self, img=None, thickness=8): ''' draw the 3d coordinate axes into given image if image == False: create an empty image ''' if img is None: img = self.img elif img is False: img = np.zeros(shape=self.img.shape, dtype=self.img.dtype) else: img = imread(img) # project 3D points to image plane: # self.opts['obj_width_mm'], self.opts['obj_height_mm'] w, h = self.opts['new_size'] axis = np.float32([[0.5 * w, 0.5 * h, 0], [w, 0.5 * h, 0], [0.5 * w, h, 0], [0.5 * w, 0.5 * h, -0.5 * w]]) t, r = self.pose() imgpts = cv2.projectPoints(axis, r, t, self.opts['cameraMatrix'], self.opts['distCoeffs'])[0] mx = int(img.max()) origin = tuple(imgpts[0].ravel()) cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness) cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness) cv2.line( img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0), thickness * 2) return img
python
def draw3dCoordAxis(self, img=None, thickness=8): ''' draw the 3d coordinate axes into given image if image == False: create an empty image ''' if img is None: img = self.img elif img is False: img = np.zeros(shape=self.img.shape, dtype=self.img.dtype) else: img = imread(img) # project 3D points to image plane: # self.opts['obj_width_mm'], self.opts['obj_height_mm'] w, h = self.opts['new_size'] axis = np.float32([[0.5 * w, 0.5 * h, 0], [w, 0.5 * h, 0], [0.5 * w, h, 0], [0.5 * w, 0.5 * h, -0.5 * w]]) t, r = self.pose() imgpts = cv2.projectPoints(axis, r, t, self.opts['cameraMatrix'], self.opts['distCoeffs'])[0] mx = int(img.max()) origin = tuple(imgpts[0].ravel()) cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness) cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness) cv2.line( img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0), thickness * 2) return img
[ "def", "draw3dCoordAxis", "(", "self", ",", "img", "=", "None", ",", "thickness", "=", "8", ")", ":", "if", "img", "is", "None", ":", "img", "=", "self", ".", "img", "elif", "img", "is", "False", ":", "img", "=", "np", ".", "zeros", "(", "shape", "=", "self", ".", "img", ".", "shape", ",", "dtype", "=", "self", ".", "img", ".", "dtype", ")", "else", ":", "img", "=", "imread", "(", "img", ")", "# project 3D points to image plane:\r", "# self.opts['obj_width_mm'], self.opts['obj_height_mm']\r", "w", ",", "h", "=", "self", ".", "opts", "[", "'new_size'", "]", "axis", "=", "np", ".", "float32", "(", "[", "[", "0.5", "*", "w", ",", "0.5", "*", "h", ",", "0", "]", ",", "[", "w", ",", "0.5", "*", "h", ",", "0", "]", ",", "[", "0.5", "*", "w", ",", "h", ",", "0", "]", ",", "[", "0.5", "*", "w", ",", "0.5", "*", "h", ",", "-", "0.5", "*", "w", "]", "]", ")", "t", ",", "r", "=", "self", ".", "pose", "(", ")", "imgpts", "=", "cv2", ".", "projectPoints", "(", "axis", ",", "r", ",", "t", ",", "self", ".", "opts", "[", "'cameraMatrix'", "]", ",", "self", ".", "opts", "[", "'distCoeffs'", "]", ")", "[", "0", "]", "mx", "=", "int", "(", "img", ".", "max", "(", ")", ")", "origin", "=", "tuple", "(", "imgpts", "[", "0", "]", ".", "ravel", "(", ")", ")", "cv2", ".", "line", "(", "img", ",", "origin", ",", "tuple", "(", "imgpts", "[", "1", "]", ".", "ravel", "(", ")", ")", ",", "(", "0", ",", "0", ",", "mx", ")", ",", "thickness", ")", "cv2", ".", "line", "(", "img", ",", "origin", ",", "tuple", "(", "imgpts", "[", "2", "]", ".", "ravel", "(", ")", ")", ",", "(", "0", ",", "mx", ",", "0", ")", ",", "thickness", ")", "cv2", ".", "line", "(", "img", ",", "origin", ",", "tuple", "(", "imgpts", "[", "3", "]", ".", "ravel", "(", ")", ")", ",", "(", "mx", ",", "0", ",", "0", ")", ",", "thickness", "*", "2", ")", "return", "img" ]
draw the 3d coordinate axes into given image if image == False: create an empty image
[ "draw", "the", "3d", "coordinate", "axes", "into", "given", "image", "if", "image", "==", "False", ":", "create", "an", "empty", "image" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L777-L807
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
PerspectiveCorrection._calcQuadSize
def _calcQuadSize(corners, aspectRatio): ''' return the size of a rectangle in perspective distortion in [px] DEBUG: PUT THAT BACK IN??:: if aspectRatio is not given is will be determined ''' if aspectRatio > 1: # x is bigger -> reduce y x_length = PerspectiveCorrection._quadXLength(corners) y = x_length / aspectRatio return x_length, y else: # y is bigger -> reduce x y_length = PerspectiveCorrection._quadYLength(corners) x = y_length * aspectRatio return x, y_length
python
def _calcQuadSize(corners, aspectRatio): ''' return the size of a rectangle in perspective distortion in [px] DEBUG: PUT THAT BACK IN??:: if aspectRatio is not given is will be determined ''' if aspectRatio > 1: # x is bigger -> reduce y x_length = PerspectiveCorrection._quadXLength(corners) y = x_length / aspectRatio return x_length, y else: # y is bigger -> reduce x y_length = PerspectiveCorrection._quadYLength(corners) x = y_length * aspectRatio return x, y_length
[ "def", "_calcQuadSize", "(", "corners", ",", "aspectRatio", ")", ":", "if", "aspectRatio", ">", "1", ":", "# x is bigger -> reduce y\r", "x_length", "=", "PerspectiveCorrection", ".", "_quadXLength", "(", "corners", ")", "y", "=", "x_length", "/", "aspectRatio", "return", "x_length", ",", "y", "else", ":", "# y is bigger -> reduce x\r", "y_length", "=", "PerspectiveCorrection", ".", "_quadYLength", "(", "corners", ")", "x", "=", "y_length", "*", "aspectRatio", "return", "x", ",", "y_length" ]
return the size of a rectangle in perspective distortion in [px] DEBUG: PUT THAT BACK IN??:: if aspectRatio is not given is will be determined
[ "return", "the", "size", "of", "a", "rectangle", "in", "perspective", "distortion", "in", "[", "px", "]", "DEBUG", ":", "PUT", "THAT", "BACK", "IN??", "::", "if", "aspectRatio", "is", "not", "given", "is", "will", "be", "determined" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L810-L823
radjkarl/imgProcessor
imgProcessor/transform/polarTransform.py
linearToPolar
def linearToPolar(img, center=None, final_radius=None, initial_radius=None, phase_width=None, interpolation=cv2.INTER_AREA, maps=None, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d (x,y) Cartesian array to a polar (r, phi) array using opencv.remap ''' if maps is None: mapY, mapX = linearToPolarMaps(img.shape[:2], center, final_radius, initial_radius, phase_width) else: mapY, mapX = maps o = {'interpolation': interpolation, 'borderValue': borderValue, 'borderMode': borderMode} o.update(opts) return cv2.remap(img, mapY, mapX, **o)
python
def linearToPolar(img, center=None, final_radius=None, initial_radius=None, phase_width=None, interpolation=cv2.INTER_AREA, maps=None, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d (x,y) Cartesian array to a polar (r, phi) array using opencv.remap ''' if maps is None: mapY, mapX = linearToPolarMaps(img.shape[:2], center, final_radius, initial_radius, phase_width) else: mapY, mapX = maps o = {'interpolation': interpolation, 'borderValue': borderValue, 'borderMode': borderMode} o.update(opts) return cv2.remap(img, mapY, mapX, **o)
[ "def", "linearToPolar", "(", "img", ",", "center", "=", "None", ",", "final_radius", "=", "None", ",", "initial_radius", "=", "None", ",", "phase_width", "=", "None", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ",", "maps", "=", "None", ",", "borderValue", "=", "0", ",", "borderMode", "=", "cv2", ".", "BORDER_REFLECT", ",", "*", "*", "opts", ")", ":", "if", "maps", "is", "None", ":", "mapY", ",", "mapX", "=", "linearToPolarMaps", "(", "img", ".", "shape", "[", ":", "2", "]", ",", "center", ",", "final_radius", ",", "initial_radius", ",", "phase_width", ")", "else", ":", "mapY", ",", "mapX", "=", "maps", "o", "=", "{", "'interpolation'", ":", "interpolation", ",", "'borderValue'", ":", "borderValue", ",", "'borderMode'", ":", "borderMode", "}", "o", ".", "update", "(", "opts", ")", "return", "cv2", ".", "remap", "(", "img", ",", "mapY", ",", "mapX", ",", "*", "*", "o", ")" ]
map a 2d (x,y) Cartesian array to a polar (r, phi) array using opencv.remap
[ "map", "a", "2d", "(", "x", "y", ")", "Cartesian", "array", "to", "a", "polar", "(", "r", "phi", ")", "array", "using", "opencv", ".", "remap" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/polarTransform.py#L45-L66
radjkarl/imgProcessor
imgProcessor/transform/polarTransform.py
polarToLinear
def polarToLinear(img, shape=None, center=None, maps=None, interpolation=cv2.INTER_AREA, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d polar (r, phi) polar array to a Cartesian (x,y) array using opencv.remap ''' if maps is None: mapY, mapX = polarToLinearMaps(img.shape[:2], shape, center) else: mapY, mapX = maps o = {'interpolation': interpolation, 'borderValue': borderValue, 'borderMode': borderMode} o.update(opts) return cv2.remap(img, mapY, mapX, **o)
python
def polarToLinear(img, shape=None, center=None, maps=None, interpolation=cv2.INTER_AREA, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d polar (r, phi) polar array to a Cartesian (x,y) array using opencv.remap ''' if maps is None: mapY, mapX = polarToLinearMaps(img.shape[:2], shape, center) else: mapY, mapX = maps o = {'interpolation': interpolation, 'borderValue': borderValue, 'borderMode': borderMode} o.update(opts) return cv2.remap(img, mapY, mapX, **o)
[ "def", "polarToLinear", "(", "img", ",", "shape", "=", "None", ",", "center", "=", "None", ",", "maps", "=", "None", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ",", "borderValue", "=", "0", ",", "borderMode", "=", "cv2", ".", "BORDER_REFLECT", ",", "*", "*", "opts", ")", ":", "if", "maps", "is", "None", ":", "mapY", ",", "mapX", "=", "polarToLinearMaps", "(", "img", ".", "shape", "[", ":", "2", "]", ",", "shape", ",", "center", ")", "else", ":", "mapY", ",", "mapX", "=", "maps", "o", "=", "{", "'interpolation'", ":", "interpolation", ",", "'borderValue'", ":", "borderValue", ",", "'borderMode'", ":", "borderMode", "}", "o", ".", "update", "(", "opts", ")", "return", "cv2", ".", "remap", "(", "img", ",", "mapY", ",", "mapX", ",", "*", "*", "o", ")" ]
map a 2d polar (r, phi) polar array to a Cartesian (x,y) array using opencv.remap
[ "map", "a", "2d", "polar", "(", "r", "phi", ")", "polar", "array", "to", "a", "Cartesian", "(", "x", "y", ")", "array", "using", "opencv", ".", "remap" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/polarTransform.py#L87-L105
radjkarl/imgProcessor
imgProcessor/measure/sharpness/parameters.py
modifiedLaplacian
def modifiedLaplacian(img): ''''LAPM' algorithm (Nayar89)''' M = np.array([-1, 2, -1]) G = cv2.getGaussianKernel(ksize=3, sigma=-1) Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G) Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M) FM = np.abs(Lx) + np.abs(Ly) return cv2.mean(FM)[0]
python
def modifiedLaplacian(img): ''''LAPM' algorithm (Nayar89)''' M = np.array([-1, 2, -1]) G = cv2.getGaussianKernel(ksize=3, sigma=-1) Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G) Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M) FM = np.abs(Lx) + np.abs(Ly) return cv2.mean(FM)[0]
[ "def", "modifiedLaplacian", "(", "img", ")", ":", "M", "=", "np", ".", "array", "(", "[", "-", "1", ",", "2", ",", "-", "1", "]", ")", "G", "=", "cv2", ".", "getGaussianKernel", "(", "ksize", "=", "3", ",", "sigma", "=", "-", "1", ")", "Lx", "=", "cv2", ".", "sepFilter2D", "(", "src", "=", "img", ",", "ddepth", "=", "cv2", ".", "CV_64F", ",", "kernelX", "=", "M", ",", "kernelY", "=", "G", ")", "Ly", "=", "cv2", ".", "sepFilter2D", "(", "src", "=", "img", ",", "ddepth", "=", "cv2", ".", "CV_64F", ",", "kernelX", "=", "G", ",", "kernelY", "=", "M", ")", "FM", "=", "np", ".", "abs", "(", "Lx", ")", "+", "np", ".", "abs", "(", "Ly", ")", "return", "cv2", ".", "mean", "(", "FM", ")", "[", "0", "]" ]
LAPM' algorithm (Nayar89)
[ "LAPM", "algorithm", "(", "Nayar89", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/parameters.py#L16-L23
radjkarl/imgProcessor
imgProcessor/measure/sharpness/parameters.py
varianceOfLaplacian
def varianceOfLaplacian(img): ''''LAPV' algorithm (Pech2000)''' lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F) stdev = cv2.meanStdDev(lap)[1] s = stdev[0]**2 return s[0]
python
def varianceOfLaplacian(img): ''''LAPV' algorithm (Pech2000)''' lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F) stdev = cv2.meanStdDev(lap)[1] s = stdev[0]**2 return s[0]
[ "def", "varianceOfLaplacian", "(", "img", ")", ":", "lap", "=", "cv2", ".", "Laplacian", "(", "img", ",", "ddepth", "=", "-", "1", ")", "#cv2.cv.CV_64F)\r", "stdev", "=", "cv2", ".", "meanStdDev", "(", "lap", ")", "[", "1", "]", "s", "=", "stdev", "[", "0", "]", "**", "2", "return", "s", "[", "0", "]" ]
LAPV' algorithm (Pech2000)
[ "LAPV", "algorithm", "(", "Pech2000", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/parameters.py#L26-L31
radjkarl/imgProcessor
imgProcessor/measure/sharpness/parameters.py
tenengrad
def tenengrad(img, ksize=3): ''''TENG' algorithm (Krotkov86)''' Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize) Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize) FM = Gx*Gx + Gy*Gy mn = cv2.mean(FM)[0] if np.isnan(mn): return np.nanmean(FM) return mn
python
def tenengrad(img, ksize=3): ''''TENG' algorithm (Krotkov86)''' Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize) Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize) FM = Gx*Gx + Gy*Gy mn = cv2.mean(FM)[0] if np.isnan(mn): return np.nanmean(FM) return mn
[ "def", "tenengrad", "(", "img", ",", "ksize", "=", "3", ")", ":", "Gx", "=", "cv2", ".", "Sobel", "(", "img", ",", "ddepth", "=", "cv2", ".", "CV_64F", ",", "dx", "=", "1", ",", "dy", "=", "0", ",", "ksize", "=", "ksize", ")", "Gy", "=", "cv2", ".", "Sobel", "(", "img", ",", "ddepth", "=", "cv2", ".", "CV_64F", ",", "dx", "=", "0", ",", "dy", "=", "1", ",", "ksize", "=", "ksize", ")", "FM", "=", "Gx", "*", "Gx", "+", "Gy", "*", "Gy", "mn", "=", "cv2", ".", "mean", "(", "FM", ")", "[", "0", "]", "if", "np", ".", "isnan", "(", "mn", ")", ":", "return", "np", ".", "nanmean", "(", "FM", ")", "return", "mn" ]
TENG' algorithm (Krotkov86)
[ "TENG", "algorithm", "(", "Krotkov86", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/parameters.py#L34-L42
radjkarl/imgProcessor
imgProcessor/measure/sharpness/parameters.py
normalizedGraylevelVariance
def normalizedGraylevelVariance(img): ''''GLVN' algorithm (Santos97)''' mean, stdev = cv2.meanStdDev(img) s = stdev[0]**2 / mean[0] return s[0]
python
def normalizedGraylevelVariance(img): ''''GLVN' algorithm (Santos97)''' mean, stdev = cv2.meanStdDev(img) s = stdev[0]**2 / mean[0] return s[0]
[ "def", "normalizedGraylevelVariance", "(", "img", ")", ":", "mean", ",", "stdev", "=", "cv2", ".", "meanStdDev", "(", "img", ")", "s", "=", "stdev", "[", "0", "]", "**", "2", "/", "mean", "[", "0", "]", "return", "s", "[", "0", "]" ]
GLVN' algorithm (Santos97)
[ "GLVN", "algorithm", "(", "Santos97", ")" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/parameters.py#L45-L49
radjkarl/imgProcessor
imgProcessor/measure/linePlot.py
linePlot
def linePlot(img, x0, y0, x1, y1, resolution=None, order=3): ''' returns [img] intensity values along line defined by [x0, y0, x1, y1] resolution ... number or data points to evaluate order ... interpolation precision ''' if resolution is None: resolution = int( ((x1-x0)**2 + (y1-y0)**2 )**0.5 ) if order == 0: x = np.linspace(x0, x1, resolution, dtype=int) y = np.linspace(y0, y1, resolution, dtype=int) return img[y, x] x = np.linspace(x0, x1, resolution) y = np.linspace(y0, y1, resolution) return map_coordinates(img, np.vstack((y,x)), order=order)
python
def linePlot(img, x0, y0, x1, y1, resolution=None, order=3): ''' returns [img] intensity values along line defined by [x0, y0, x1, y1] resolution ... number or data points to evaluate order ... interpolation precision ''' if resolution is None: resolution = int( ((x1-x0)**2 + (y1-y0)**2 )**0.5 ) if order == 0: x = np.linspace(x0, x1, resolution, dtype=int) y = np.linspace(y0, y1, resolution, dtype=int) return img[y, x] x = np.linspace(x0, x1, resolution) y = np.linspace(y0, y1, resolution) return map_coordinates(img, np.vstack((y,x)), order=order)
[ "def", "linePlot", "(", "img", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "resolution", "=", "None", ",", "order", "=", "3", ")", ":", "if", "resolution", "is", "None", ":", "resolution", "=", "int", "(", "(", "(", "x1", "-", "x0", ")", "**", "2", "+", "(", "y1", "-", "y0", ")", "**", "2", ")", "**", "0.5", ")", "if", "order", "==", "0", ":", "x", "=", "np", ".", "linspace", "(", "x0", ",", "x1", ",", "resolution", ",", "dtype", "=", "int", ")", "y", "=", "np", ".", "linspace", "(", "y0", ",", "y1", ",", "resolution", ",", "dtype", "=", "int", ")", "return", "img", "[", "y", ",", "x", "]", "x", "=", "np", ".", "linspace", "(", "x0", ",", "x1", ",", "resolution", ")", "y", "=", "np", ".", "linspace", "(", "y0", ",", "y1", ",", "resolution", ")", "return", "map_coordinates", "(", "img", ",", "np", ".", "vstack", "(", "(", "y", ",", "x", ")", ")", ",", "order", "=", "order", ")" ]
returns [img] intensity values along line defined by [x0, y0, x1, y1] resolution ... number or data points to evaluate order ... interpolation precision
[ "returns", "[", "img", "]", "intensity", "values", "along", "line", "defined", "by", "[", "x0", "y0", "x1", "y1", "]", "resolution", "...", "number", "or", "data", "points", "to", "evaluate", "order", "...", "interpolation", "precision" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/linePlot.py#L10-L28
radjkarl/imgProcessor
DUMP/FlatFieldFromImgFit.py
FlatFieldFromImgFit.flatFieldFromFunction
def flatFieldFromFunction(self): ''' calculate flatField from fitting vignetting function to averaged fit-image returns flatField, average background level, fitted image, valid indices mask ''' fitimg, mask = self._prepare() mask = ~mask s0, s1 = fitimg.shape #f-value, alpha, fx, cx, cy guess = (s1 * 0.7, 0, 1, s0 / 2, s1 / 2) # set assume normal plane - no tilt and rotation: fn = lambda xy, f, alpha, fx, cx, cy: vignetting((xy[0] * fx, xy[1]), f, alpha, cx=cx, cy=cy) # mask = fitimg>0.5 flatfield = fit2dArrayToFn(fitimg, fn, mask=mask, guess=guess, output_shape=self._orig_shape)[0] return flatfield, self.bglevel / self._n, fitimg, mask
python
def flatFieldFromFunction(self): ''' calculate flatField from fitting vignetting function to averaged fit-image returns flatField, average background level, fitted image, valid indices mask ''' fitimg, mask = self._prepare() mask = ~mask s0, s1 = fitimg.shape #f-value, alpha, fx, cx, cy guess = (s1 * 0.7, 0, 1, s0 / 2, s1 / 2) # set assume normal plane - no tilt and rotation: fn = lambda xy, f, alpha, fx, cx, cy: vignetting((xy[0] * fx, xy[1]), f, alpha, cx=cx, cy=cy) # mask = fitimg>0.5 flatfield = fit2dArrayToFn(fitimg, fn, mask=mask, guess=guess, output_shape=self._orig_shape)[0] return flatfield, self.bglevel / self._n, fitimg, mask
[ "def", "flatFieldFromFunction", "(", "self", ")", ":", "fitimg", ",", "mask", "=", "self", ".", "_prepare", "(", ")", "mask", "=", "~", "mask", "s0", ",", "s1", "=", "fitimg", ".", "shape", "#f-value, alpha, fx, cx, cy\r", "guess", "=", "(", "s1", "*", "0.7", ",", "0", ",", "1", ",", "s0", "/", "2", ",", "s1", "/", "2", ")", "# set assume normal plane - no tilt and rotation:\r", "fn", "=", "lambda", "xy", ",", "f", ",", "alpha", ",", "fx", ",", "cx", ",", "cy", ":", "vignetting", "(", "(", "xy", "[", "0", "]", "*", "fx", ",", "xy", "[", "1", "]", ")", ",", "f", ",", "alpha", ",", "cx", "=", "cx", ",", "cy", "=", "cy", ")", "# mask = fitimg>0.5\r", "flatfield", "=", "fit2dArrayToFn", "(", "fitimg", ",", "fn", ",", "mask", "=", "mask", ",", "guess", "=", "guess", ",", "output_shape", "=", "self", ".", "_orig_shape", ")", "[", "0", "]", "return", "flatfield", ",", "self", ".", "bglevel", "/", "self", ".", "_n", ",", "fitimg", ",", "mask" ]
calculate flatField from fitting vignetting function to averaged fit-image returns flatField, average background level, fitted image, valid indices mask
[ "calculate", "flatField", "from", "fitting", "vignetting", "function", "to", "averaged", "fit", "-", "image", "returns", "flatField", "average", "background", "level", "fitted", "image", "valid", "indices", "mask" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/DUMP/FlatFieldFromImgFit.py#L104-L125
radjkarl/imgProcessor
DUMP/FlatFieldFromImgFit.py
FlatFieldFromImgFit.flatFieldFromFit
def flatFieldFromFit(self): ''' calculate flatField from 2d-polynomal fit filling all high gradient areas within averaged fit-image returns flatField, average background level, fitted image, valid indices mask ''' fitimg, mask = self._prepare() out = fitimg.copy() lastm = 0 for _ in range(10): out = polyfit2dGrid(out, mask, 2) mask = highGrad(out) m = mask.sum() if m == lastm: break lastm = m out = np.clip(out, 0.1, 1) out = resize(out, self._orig_shape, mode='reflect') return out, self.bglevel / self._n, fitimg, mask
python
def flatFieldFromFit(self): ''' calculate flatField from 2d-polynomal fit filling all high gradient areas within averaged fit-image returns flatField, average background level, fitted image, valid indices mask ''' fitimg, mask = self._prepare() out = fitimg.copy() lastm = 0 for _ in range(10): out = polyfit2dGrid(out, mask, 2) mask = highGrad(out) m = mask.sum() if m == lastm: break lastm = m out = np.clip(out, 0.1, 1) out = resize(out, self._orig_shape, mode='reflect') return out, self.bglevel / self._n, fitimg, mask
[ "def", "flatFieldFromFit", "(", "self", ")", ":", "fitimg", ",", "mask", "=", "self", ".", "_prepare", "(", ")", "out", "=", "fitimg", ".", "copy", "(", ")", "lastm", "=", "0", "for", "_", "in", "range", "(", "10", ")", ":", "out", "=", "polyfit2dGrid", "(", "out", ",", "mask", ",", "2", ")", "mask", "=", "highGrad", "(", "out", ")", "m", "=", "mask", ".", "sum", "(", ")", "if", "m", "==", "lastm", ":", "break", "lastm", "=", "m", "out", "=", "np", ".", "clip", "(", "out", ",", "0.1", ",", "1", ")", "out", "=", "resize", "(", "out", ",", "self", ".", "_orig_shape", ",", "mode", "=", "'reflect'", ")", "return", "out", ",", "self", ".", "bglevel", "/", "self", ".", "_n", ",", "fitimg", ",", "mask" ]
calculate flatField from 2d-polynomal fit filling all high gradient areas within averaged fit-image returns flatField, average background level, fitted image, valid indices mask
[ "calculate", "flatField", "from", "2d", "-", "polynomal", "fit", "filling", "all", "high", "gradient", "areas", "within", "averaged", "fit", "-", "image", "returns", "flatField", "average", "background", "level", "fitted", "image", "valid", "indices", "mask" ]
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/DUMP/FlatFieldFromImgFit.py#L133-L156
kevinpt/hdlparse
hdlparse/vhdl_parser.py
parse_vhdl_file
def parse_vhdl_file(fname): '''Parse a named VHDL file Args: fname(str): Name of file to parse Returns: Parsed objects. ''' with open(fname, 'rt') as fh: text = fh.read() return parse_vhdl(text)
python
def parse_vhdl_file(fname): '''Parse a named VHDL file Args: fname(str): Name of file to parse Returns: Parsed objects. ''' with open(fname, 'rt') as fh: text = fh.read() return parse_vhdl(text)
[ "def", "parse_vhdl_file", "(", "fname", ")", ":", "with", "open", "(", "fname", ",", "'rt'", ")", "as", "fh", ":", "text", "=", "fh", ".", "read", "(", ")", "return", "parse_vhdl", "(", "text", ")" ]
Parse a named VHDL file Args: fname(str): Name of file to parse Returns: Parsed objects.
[ "Parse", "a", "named", "VHDL", "file", "Args", ":", "fname", "(", "str", ")", ":", "Name", "of", "file", "to", "parse", "Returns", ":", "Parsed", "objects", "." ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L322-L332
kevinpt/hdlparse
hdlparse/vhdl_parser.py
parse_vhdl
def parse_vhdl(text): '''Parse a text buffer of VHDL code Args: text(str): Source code to parse Returns: Parsed objects. ''' lex = VhdlLexer name = None kind = None saved_type = None end_param_group = False cur_package = None metacomments = [] parameters = [] param_items = [] generics = [] ports = [] sections = [] port_param_index = 0 last_item = None array_range_start_pos = 0 objects = [] for pos, action, groups in lex.run(text): if action == 'metacomment': realigned = re.sub(r'^#+', lambda m: ' ' * len(m.group(0)), groups[0]) if last_item is None: metacomments.append(realigned) else: last_item.desc = realigned if action == 'section_meta': sections.append((port_param_index, groups[0])) elif action == 'function': kind = 'function' name = groups[0] param_items = [] parameters = [] elif action == 'procedure': kind = 'procedure' name = groups[0] param_items = [] parameters = [] elif action == 'param': if end_param_group: # Complete previous parameters for i in param_items: parameters.append(i) param_items = [] end_param_group = False param_items.append(VhdlParameter(groups[1])) elif action == 'param_type': mode, ptype = groups if mode is not None: mode = mode.strip() for i in param_items: # Set mode and type for all pending parameters i.mode = mode i.data_type = ptype end_param_group = True elif action == 'param_default': for i in param_items: i.default_value = groups[0] elif action == 'end_subprogram': # Complete last parameters for i in param_items: parameters.append(i) if kind == 'function': vobj = VhdlFunction(name, cur_package, parameters, groups[0], metacomments) else: vobj = VhdlProcedure(name, cur_package, parameters, metacomments) objects.append(vobj) metacomments = [] parameters = [] param_items = [] kind = None name = None elif action == 'component': kind = 'component' name = groups[0] generics = [] ports = [] param_items = [] sections = [] port_param_index = 0 elif action == 'generic_param': param_items.append(groups[0]) elif action == 'generic_param_type': ptype = groups[0] for i in param_items: generics.append(VhdlParameter(i, 'in', ptype)) param_items = [] last_item = generics[-1] elif action == 'port_param': param_items.append(groups[0]) port_param_index += 1 elif action == 'port_param_type': mode, ptype = groups for i in param_items: ports.append(VhdlParameter(i, mode, ptype)) param_items = [] last_item = ports[-1] elif action == 'port_array_param_type': mode, ptype = groups array_range_start_pos = pos[1] elif action == 'array_range_end': arange = text[array_range_start_pos:pos[0]+1] for i in param_items: ports.append(VhdlParameter(i, mode, ptype + arange)) param_items = [] last_item = ports[-1] elif action == 'end_component': vobj = VhdlComponent(name, cur_package, ports, generics, dict(sections), metacomments) objects.append(vobj) last_item = None metacomments = [] elif action == 'package': objects.append(VhdlPackage(groups[0])) cur_package = groups[0] kind = None name = None elif action == 'type': saved_type = groups[0] elif action in ('array_type', 'file_type', 'access_type', 'record_type', 'range_type', 'enum_type', 'incomplete_type'): vobj = VhdlType(saved_type, cur_package, action, metacomments) objects.append(vobj) kind = None name = None metacomments = [] elif action == 'subtype': vobj = VhdlSubtype(groups[0], cur_package, groups[1], metacomments) objects.append(vobj) kind = None name = None metacomments = [] elif action == 'constant': vobj = VhdlConstant(groups[0], cur_package, groups[1], metacomments) objects.append(vobj) kind = None name = None metacomments = [] return objects
python
def parse_vhdl(text): '''Parse a text buffer of VHDL code Args: text(str): Source code to parse Returns: Parsed objects. ''' lex = VhdlLexer name = None kind = None saved_type = None end_param_group = False cur_package = None metacomments = [] parameters = [] param_items = [] generics = [] ports = [] sections = [] port_param_index = 0 last_item = None array_range_start_pos = 0 objects = [] for pos, action, groups in lex.run(text): if action == 'metacomment': realigned = re.sub(r'^#+', lambda m: ' ' * len(m.group(0)), groups[0]) if last_item is None: metacomments.append(realigned) else: last_item.desc = realigned if action == 'section_meta': sections.append((port_param_index, groups[0])) elif action == 'function': kind = 'function' name = groups[0] param_items = [] parameters = [] elif action == 'procedure': kind = 'procedure' name = groups[0] param_items = [] parameters = [] elif action == 'param': if end_param_group: # Complete previous parameters for i in param_items: parameters.append(i) param_items = [] end_param_group = False param_items.append(VhdlParameter(groups[1])) elif action == 'param_type': mode, ptype = groups if mode is not None: mode = mode.strip() for i in param_items: # Set mode and type for all pending parameters i.mode = mode i.data_type = ptype end_param_group = True elif action == 'param_default': for i in param_items: i.default_value = groups[0] elif action == 'end_subprogram': # Complete last parameters for i in param_items: parameters.append(i) if kind == 'function': vobj = VhdlFunction(name, cur_package, parameters, groups[0], metacomments) else: vobj = VhdlProcedure(name, cur_package, parameters, metacomments) objects.append(vobj) metacomments = [] parameters = [] param_items = [] kind = None name = None elif action == 'component': kind = 'component' name = groups[0] generics = [] ports = [] param_items = [] sections = [] port_param_index = 0 elif action == 'generic_param': param_items.append(groups[0]) elif action == 'generic_param_type': ptype = groups[0] for i in param_items: generics.append(VhdlParameter(i, 'in', ptype)) param_items = [] last_item = generics[-1] elif action == 'port_param': param_items.append(groups[0]) port_param_index += 1 elif action == 'port_param_type': mode, ptype = groups for i in param_items: ports.append(VhdlParameter(i, mode, ptype)) param_items = [] last_item = ports[-1] elif action == 'port_array_param_type': mode, ptype = groups array_range_start_pos = pos[1] elif action == 'array_range_end': arange = text[array_range_start_pos:pos[0]+1] for i in param_items: ports.append(VhdlParameter(i, mode, ptype + arange)) param_items = [] last_item = ports[-1] elif action == 'end_component': vobj = VhdlComponent(name, cur_package, ports, generics, dict(sections), metacomments) objects.append(vobj) last_item = None metacomments = [] elif action == 'package': objects.append(VhdlPackage(groups[0])) cur_package = groups[0] kind = None name = None elif action == 'type': saved_type = groups[0] elif action in ('array_type', 'file_type', 'access_type', 'record_type', 'range_type', 'enum_type', 'incomplete_type'): vobj = VhdlType(saved_type, cur_package, action, metacomments) objects.append(vobj) kind = None name = None metacomments = [] elif action == 'subtype': vobj = VhdlSubtype(groups[0], cur_package, groups[1], metacomments) objects.append(vobj) kind = None name = None metacomments = [] elif action == 'constant': vobj = VhdlConstant(groups[0], cur_package, groups[1], metacomments) objects.append(vobj) kind = None name = None metacomments = [] return objects
[ "def", "parse_vhdl", "(", "text", ")", ":", "lex", "=", "VhdlLexer", "name", "=", "None", "kind", "=", "None", "saved_type", "=", "None", "end_param_group", "=", "False", "cur_package", "=", "None", "metacomments", "=", "[", "]", "parameters", "=", "[", "]", "param_items", "=", "[", "]", "generics", "=", "[", "]", "ports", "=", "[", "]", "sections", "=", "[", "]", "port_param_index", "=", "0", "last_item", "=", "None", "array_range_start_pos", "=", "0", "objects", "=", "[", "]", "for", "pos", ",", "action", ",", "groups", "in", "lex", ".", "run", "(", "text", ")", ":", "if", "action", "==", "'metacomment'", ":", "realigned", "=", "re", ".", "sub", "(", "r'^#+'", ",", "lambda", "m", ":", "' '", "*", "len", "(", "m", ".", "group", "(", "0", ")", ")", ",", "groups", "[", "0", "]", ")", "if", "last_item", "is", "None", ":", "metacomments", ".", "append", "(", "realigned", ")", "else", ":", "last_item", ".", "desc", "=", "realigned", "if", "action", "==", "'section_meta'", ":", "sections", ".", "append", "(", "(", "port_param_index", ",", "groups", "[", "0", "]", ")", ")", "elif", "action", "==", "'function'", ":", "kind", "=", "'function'", "name", "=", "groups", "[", "0", "]", "param_items", "=", "[", "]", "parameters", "=", "[", "]", "elif", "action", "==", "'procedure'", ":", "kind", "=", "'procedure'", "name", "=", "groups", "[", "0", "]", "param_items", "=", "[", "]", "parameters", "=", "[", "]", "elif", "action", "==", "'param'", ":", "if", "end_param_group", ":", "# Complete previous parameters", "for", "i", "in", "param_items", ":", "parameters", ".", "append", "(", "i", ")", "param_items", "=", "[", "]", "end_param_group", "=", "False", "param_items", ".", "append", "(", "VhdlParameter", "(", "groups", "[", "1", "]", ")", ")", "elif", "action", "==", "'param_type'", ":", "mode", ",", "ptype", "=", "groups", "if", "mode", "is", "not", "None", ":", "mode", "=", "mode", ".", "strip", "(", ")", "for", "i", "in", "param_items", ":", "# Set mode and type for all pending parameters", "i", ".", "mode", "=", "mode", "i", ".", "data_type", "=", "ptype", "end_param_group", "=", "True", "elif", "action", "==", "'param_default'", ":", "for", "i", "in", "param_items", ":", "i", ".", "default_value", "=", "groups", "[", "0", "]", "elif", "action", "==", "'end_subprogram'", ":", "# Complete last parameters", "for", "i", "in", "param_items", ":", "parameters", ".", "append", "(", "i", ")", "if", "kind", "==", "'function'", ":", "vobj", "=", "VhdlFunction", "(", "name", ",", "cur_package", ",", "parameters", ",", "groups", "[", "0", "]", ",", "metacomments", ")", "else", ":", "vobj", "=", "VhdlProcedure", "(", "name", ",", "cur_package", ",", "parameters", ",", "metacomments", ")", "objects", ".", "append", "(", "vobj", ")", "metacomments", "=", "[", "]", "parameters", "=", "[", "]", "param_items", "=", "[", "]", "kind", "=", "None", "name", "=", "None", "elif", "action", "==", "'component'", ":", "kind", "=", "'component'", "name", "=", "groups", "[", "0", "]", "generics", "=", "[", "]", "ports", "=", "[", "]", "param_items", "=", "[", "]", "sections", "=", "[", "]", "port_param_index", "=", "0", "elif", "action", "==", "'generic_param'", ":", "param_items", ".", "append", "(", "groups", "[", "0", "]", ")", "elif", "action", "==", "'generic_param_type'", ":", "ptype", "=", "groups", "[", "0", "]", "for", "i", "in", "param_items", ":", "generics", ".", "append", "(", "VhdlParameter", "(", "i", ",", "'in'", ",", "ptype", ")", ")", "param_items", "=", "[", "]", "last_item", "=", "generics", "[", "-", "1", "]", "elif", "action", "==", "'port_param'", ":", "param_items", ".", "append", "(", "groups", "[", "0", "]", ")", "port_param_index", "+=", "1", "elif", "action", "==", "'port_param_type'", ":", "mode", ",", "ptype", "=", "groups", "for", "i", "in", "param_items", ":", "ports", ".", "append", "(", "VhdlParameter", "(", "i", ",", "mode", ",", "ptype", ")", ")", "param_items", "=", "[", "]", "last_item", "=", "ports", "[", "-", "1", "]", "elif", "action", "==", "'port_array_param_type'", ":", "mode", ",", "ptype", "=", "groups", "array_range_start_pos", "=", "pos", "[", "1", "]", "elif", "action", "==", "'array_range_end'", ":", "arange", "=", "text", "[", "array_range_start_pos", ":", "pos", "[", "0", "]", "+", "1", "]", "for", "i", "in", "param_items", ":", "ports", ".", "append", "(", "VhdlParameter", "(", "i", ",", "mode", ",", "ptype", "+", "arange", ")", ")", "param_items", "=", "[", "]", "last_item", "=", "ports", "[", "-", "1", "]", "elif", "action", "==", "'end_component'", ":", "vobj", "=", "VhdlComponent", "(", "name", ",", "cur_package", ",", "ports", ",", "generics", ",", "dict", "(", "sections", ")", ",", "metacomments", ")", "objects", ".", "append", "(", "vobj", ")", "last_item", "=", "None", "metacomments", "=", "[", "]", "elif", "action", "==", "'package'", ":", "objects", ".", "append", "(", "VhdlPackage", "(", "groups", "[", "0", "]", ")", ")", "cur_package", "=", "groups", "[", "0", "]", "kind", "=", "None", "name", "=", "None", "elif", "action", "==", "'type'", ":", "saved_type", "=", "groups", "[", "0", "]", "elif", "action", "in", "(", "'array_type'", ",", "'file_type'", ",", "'access_type'", ",", "'record_type'", ",", "'range_type'", ",", "'enum_type'", ",", "'incomplete_type'", ")", ":", "vobj", "=", "VhdlType", "(", "saved_type", ",", "cur_package", ",", "action", ",", "metacomments", ")", "objects", ".", "append", "(", "vobj", ")", "kind", "=", "None", "name", "=", "None", "metacomments", "=", "[", "]", "elif", "action", "==", "'subtype'", ":", "vobj", "=", "VhdlSubtype", "(", "groups", "[", "0", "]", ",", "cur_package", ",", "groups", "[", "1", "]", ",", "metacomments", ")", "objects", ".", "append", "(", "vobj", ")", "kind", "=", "None", "name", "=", "None", "metacomments", "=", "[", "]", "elif", "action", "==", "'constant'", ":", "vobj", "=", "VhdlConstant", "(", "groups", "[", "0", "]", ",", "cur_package", ",", "groups", "[", "1", "]", ",", "metacomments", ")", "objects", ".", "append", "(", "vobj", ")", "kind", "=", "None", "name", "=", "None", "metacomments", "=", "[", "]", "return", "objects" ]
Parse a text buffer of VHDL code Args: text(str): Source code to parse Returns: Parsed objects.
[ "Parse", "a", "text", "buffer", "of", "VHDL", "code" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L334-L508
kevinpt/hdlparse
hdlparse/vhdl_parser.py
subprogram_prototype
def subprogram_prototype(vo): '''Generate a canonical prototype string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Prototype string. ''' plist = '; '.join(str(p) for p in vo.parameters) if isinstance(vo, VhdlFunction): if len(vo.parameters) > 0: proto = 'function {}({}) return {};'.format(vo.name, plist, vo.return_type) else: proto = 'function {} return {};'.format(vo.name, vo.return_type) else: # procedure proto = 'procedure {}({});'.format(vo.name, plist) return proto
python
def subprogram_prototype(vo): '''Generate a canonical prototype string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Prototype string. ''' plist = '; '.join(str(p) for p in vo.parameters) if isinstance(vo, VhdlFunction): if len(vo.parameters) > 0: proto = 'function {}({}) return {};'.format(vo.name, plist, vo.return_type) else: proto = 'function {} return {};'.format(vo.name, vo.return_type) else: # procedure proto = 'procedure {}({});'.format(vo.name, plist) return proto
[ "def", "subprogram_prototype", "(", "vo", ")", ":", "plist", "=", "'; '", ".", "join", "(", "str", "(", "p", ")", "for", "p", "in", "vo", ".", "parameters", ")", "if", "isinstance", "(", "vo", ",", "VhdlFunction", ")", ":", "if", "len", "(", "vo", ".", "parameters", ")", ">", "0", ":", "proto", "=", "'function {}({}) return {};'", ".", "format", "(", "vo", ".", "name", ",", "plist", ",", "vo", ".", "return_type", ")", "else", ":", "proto", "=", "'function {} return {};'", ".", "format", "(", "vo", ".", "name", ",", "vo", ".", "return_type", ")", "else", ":", "# procedure", "proto", "=", "'procedure {}({});'", ".", "format", "(", "vo", ".", "name", ",", "plist", ")", "return", "proto" ]
Generate a canonical prototype string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Prototype string.
[ "Generate", "a", "canonical", "prototype", "string", "Args", ":", "vo", "(", "VhdlFunction", "VhdlProcedure", ")", ":", "Subprogram", "object", "Returns", ":", "Prototype", "string", "." ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L511-L531
kevinpt/hdlparse
hdlparse/vhdl_parser.py
subprogram_signature
def subprogram_signature(vo, fullname=None): '''Generate a signature string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Signature string. ''' if fullname is None: fullname = vo.name if isinstance(vo, VhdlFunction): plist = ','.join(p.data_type for p in vo.parameters) sig = '{}[{} return {}]'.format(fullname, plist, vo.return_type) else: # procedure plist = ','.join(p.data_type for p in vo.parameters) sig = '{}[{}]'.format(fullname, plist) return sig
python
def subprogram_signature(vo, fullname=None): '''Generate a signature string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Signature string. ''' if fullname is None: fullname = vo.name if isinstance(vo, VhdlFunction): plist = ','.join(p.data_type for p in vo.parameters) sig = '{}[{} return {}]'.format(fullname, plist, vo.return_type) else: # procedure plist = ','.join(p.data_type for p in vo.parameters) sig = '{}[{}]'.format(fullname, plist) return sig
[ "def", "subprogram_signature", "(", "vo", ",", "fullname", "=", "None", ")", ":", "if", "fullname", "is", "None", ":", "fullname", "=", "vo", ".", "name", "if", "isinstance", "(", "vo", ",", "VhdlFunction", ")", ":", "plist", "=", "','", ".", "join", "(", "p", ".", "data_type", "for", "p", "in", "vo", ".", "parameters", ")", "sig", "=", "'{}[{} return {}]'", ".", "format", "(", "fullname", ",", "plist", ",", "vo", ".", "return_type", ")", "else", ":", "# procedure", "plist", "=", "','", ".", "join", "(", "p", ".", "data_type", "for", "p", "in", "vo", ".", "parameters", ")", "sig", "=", "'{}[{}]'", ".", "format", "(", "fullname", ",", "plist", ")", "return", "sig" ]
Generate a signature string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Signature string.
[ "Generate", "a", "signature", "string", "Args", ":", "vo", "(", "VhdlFunction", "VhdlProcedure", ")", ":", "Subprogram", "object", "Returns", ":", "Signature", "string", "." ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L533-L552
kevinpt/hdlparse
hdlparse/vhdl_parser.py
VhdlExtractor.extract_objects_from_source
def extract_objects_from_source(self, text, type_filter=None): '''Extract object declarations from a text buffer Args: text (str): Source code to parse type_filter (class, optional): Object class to filter results Returns: List of parsed objects. ''' objects = parse_vhdl(text) self._register_array_types(objects) if type_filter: objects = [o for o in objects if isinstance(o, type_filter)] return objects
python
def extract_objects_from_source(self, text, type_filter=None): '''Extract object declarations from a text buffer Args: text (str): Source code to parse type_filter (class, optional): Object class to filter results Returns: List of parsed objects. ''' objects = parse_vhdl(text) self._register_array_types(objects) if type_filter: objects = [o for o in objects if isinstance(o, type_filter)] return objects
[ "def", "extract_objects_from_source", "(", "self", ",", "text", ",", "type_filter", "=", "None", ")", ":", "objects", "=", "parse_vhdl", "(", "text", ")", "self", ".", "_register_array_types", "(", "objects", ")", "if", "type_filter", ":", "objects", "=", "[", "o", "for", "o", "in", "objects", "if", "isinstance", "(", "o", ",", "type_filter", ")", "]", "return", "objects" ]
Extract object declarations from a text buffer Args: text (str): Source code to parse type_filter (class, optional): Object class to filter results Returns: List of parsed objects.
[ "Extract", "object", "declarations", "from", "a", "text", "buffer" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L603-L618
kevinpt/hdlparse
hdlparse/vhdl_parser.py
VhdlExtractor.is_array
def is_array(self, data_type): '''Check if a type is a known array type Args: data_type (str): Name of type to check Returns: True if ``data_type`` is a known array type. ''' # Split off any brackets data_type = data_type.split('[')[0].strip() return data_type.lower() in self.array_types
python
def is_array(self, data_type): '''Check if a type is a known array type Args: data_type (str): Name of type to check Returns: True if ``data_type`` is a known array type. ''' # Split off any brackets data_type = data_type.split('[')[0].strip() return data_type.lower() in self.array_types
[ "def", "is_array", "(", "self", ",", "data_type", ")", ":", "# Split off any brackets", "data_type", "=", "data_type", ".", "split", "(", "'['", ")", "[", "0", "]", ".", "strip", "(", ")", "return", "data_type", ".", "lower", "(", ")", "in", "self", ".", "array_types" ]
Check if a type is a known array type Args: data_type (str): Name of type to check Returns: True if ``data_type`` is a known array type.
[ "Check", "if", "a", "type", "is", "a", "known", "array", "type", "Args", ":", "data_type", "(", "str", ")", ":", "Name", "of", "type", "to", "check", "Returns", ":", "True", "if", "data_type", "is", "a", "known", "array", "type", "." ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L621-L633
kevinpt/hdlparse
hdlparse/vhdl_parser.py
VhdlExtractor.load_array_types
def load_array_types(self, fname): '''Load file of previously extracted data types Args: fname (str): Name of file to load array database from ''' type_defs = '' with open(fname, 'rt') as fh: type_defs = fh.read() try: type_defs = ast.literal_eval(type_defs) except SyntaxError: type_defs = {} self._add_array_types(type_defs)
python
def load_array_types(self, fname): '''Load file of previously extracted data types Args: fname (str): Name of file to load array database from ''' type_defs = '' with open(fname, 'rt') as fh: type_defs = fh.read() try: type_defs = ast.literal_eval(type_defs) except SyntaxError: type_defs = {} self._add_array_types(type_defs)
[ "def", "load_array_types", "(", "self", ",", "fname", ")", ":", "type_defs", "=", "''", "with", "open", "(", "fname", ",", "'rt'", ")", "as", "fh", ":", "type_defs", "=", "fh", ".", "read", "(", ")", "try", ":", "type_defs", "=", "ast", ".", "literal_eval", "(", "type_defs", ")", "except", "SyntaxError", ":", "type_defs", "=", "{", "}", "self", ".", "_add_array_types", "(", "type_defs", ")" ]
Load file of previously extracted data types Args: fname (str): Name of file to load array database from
[ "Load", "file", "of", "previously", "extracted", "data", "types", "Args", ":", "fname", "(", "str", ")", ":", "Name", "of", "file", "to", "load", "array", "database", "from" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L645-L660
kevinpt/hdlparse
hdlparse/vhdl_parser.py
VhdlExtractor.save_array_types
def save_array_types(self, fname): '''Save array type registry to a file Args: fname (str): Name of file to save array database to ''' type_defs = {'arrays': sorted(list(self.array_types))} with open(fname, 'wt') as fh: pprint(type_defs, stream=fh)
python
def save_array_types(self, fname): '''Save array type registry to a file Args: fname (str): Name of file to save array database to ''' type_defs = {'arrays': sorted(list(self.array_types))} with open(fname, 'wt') as fh: pprint(type_defs, stream=fh)
[ "def", "save_array_types", "(", "self", ",", "fname", ")", ":", "type_defs", "=", "{", "'arrays'", ":", "sorted", "(", "list", "(", "self", ".", "array_types", ")", ")", "}", "with", "open", "(", "fname", ",", "'wt'", ")", "as", "fh", ":", "pprint", "(", "type_defs", ",", "stream", "=", "fh", ")" ]
Save array type registry to a file Args: fname (str): Name of file to save array database to
[ "Save", "array", "type", "registry", "to", "a", "file", "Args", ":", "fname", "(", "str", ")", ":", "Name", "of", "file", "to", "save", "array", "database", "to" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L662-L670
kevinpt/hdlparse
hdlparse/vhdl_parser.py
VhdlExtractor._register_array_types
def _register_array_types(self, objects): '''Add array type definitions to internal registry Args: objects (list of VhdlType or VhdlSubtype): Array types to track ''' # Add all array types directly types = [o for o in objects if isinstance(o, VhdlType) and o.type_of == 'array_type'] for t in types: self.array_types.add(t.name) subtypes = {o.name:o.base_type for o in objects if isinstance(o, VhdlSubtype)} # Find all subtypes of an array type for k,v in subtypes.iteritems(): while v in subtypes: # Follow subtypes of subtypes v = subtypes[v] if v in self.array_types: self.array_types.add(k)
python
def _register_array_types(self, objects): '''Add array type definitions to internal registry Args: objects (list of VhdlType or VhdlSubtype): Array types to track ''' # Add all array types directly types = [o for o in objects if isinstance(o, VhdlType) and o.type_of == 'array_type'] for t in types: self.array_types.add(t.name) subtypes = {o.name:o.base_type for o in objects if isinstance(o, VhdlSubtype)} # Find all subtypes of an array type for k,v in subtypes.iteritems(): while v in subtypes: # Follow subtypes of subtypes v = subtypes[v] if v in self.array_types: self.array_types.add(k)
[ "def", "_register_array_types", "(", "self", ",", "objects", ")", ":", "# Add all array types directly", "types", "=", "[", "o", "for", "o", "in", "objects", "if", "isinstance", "(", "o", ",", "VhdlType", ")", "and", "o", ".", "type_of", "==", "'array_type'", "]", "for", "t", "in", "types", ":", "self", ".", "array_types", ".", "add", "(", "t", ".", "name", ")", "subtypes", "=", "{", "o", ".", "name", ":", "o", ".", "base_type", "for", "o", "in", "objects", "if", "isinstance", "(", "o", ",", "VhdlSubtype", ")", "}", "# Find all subtypes of an array type", "for", "k", ",", "v", "in", "subtypes", ".", "iteritems", "(", ")", ":", "while", "v", "in", "subtypes", ":", "# Follow subtypes of subtypes", "v", "=", "subtypes", "[", "v", "]", "if", "v", "in", "self", ".", "array_types", ":", "self", ".", "array_types", ".", "add", "(", "k", ")" ]
Add array type definitions to internal registry Args: objects (list of VhdlType or VhdlSubtype): Array types to track
[ "Add", "array", "type", "definitions", "to", "internal", "registry", "Args", ":", "objects", "(", "list", "of", "VhdlType", "or", "VhdlSubtype", ")", ":", "Array", "types", "to", "track" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L672-L690
kevinpt/hdlparse
hdlparse/vhdl_parser.py
VhdlExtractor.register_array_types_from_sources
def register_array_types_from_sources(self, source_files): '''Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions ''' for fname in source_files: if is_vhdl(fname): self._register_array_types(self.extract_objects(fname))
python
def register_array_types_from_sources(self, source_files): '''Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions ''' for fname in source_files: if is_vhdl(fname): self._register_array_types(self.extract_objects(fname))
[ "def", "register_array_types_from_sources", "(", "self", ",", "source_files", ")", ":", "for", "fname", "in", "source_files", ":", "if", "is_vhdl", "(", "fname", ")", ":", "self", ".", "_register_array_types", "(", "self", ".", "extract_objects", "(", "fname", ")", ")" ]
Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions
[ "Add", "array", "type", "definitions", "from", "a", "file", "list", "to", "internal", "registry" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L692-L700
kevinpt/hdlparse
hdlparse/minilexer.py
MiniLexer.run
def run(self, text): '''Run lexer rules against a source text Args: text (str): Text to apply lexer to Yields: A sequence of lexer matches. ''' stack = ['root'] pos = 0 patterns = self.tokens[stack[-1]] while True: for pat, action, new_state in patterns: m = pat.match(text, pos) if m: if action: #print('## MATCH: {} -> {}'.format(m.group(), action)) yield (pos, m.end()-1), action, m.groups() pos = m.end() if new_state: if isinstance(new_state, int): # Pop states del stack[new_state:] else: stack.append(new_state) #print('## CHANGE STATE:', pos, new_state, stack) patterns = self.tokens[stack[-1]] break else: try: if text[pos] == '\n': pos += 1 continue pos += 1 except IndexError: break
python
def run(self, text): '''Run lexer rules against a source text Args: text (str): Text to apply lexer to Yields: A sequence of lexer matches. ''' stack = ['root'] pos = 0 patterns = self.tokens[stack[-1]] while True: for pat, action, new_state in patterns: m = pat.match(text, pos) if m: if action: #print('## MATCH: {} -> {}'.format(m.group(), action)) yield (pos, m.end()-1), action, m.groups() pos = m.end() if new_state: if isinstance(new_state, int): # Pop states del stack[new_state:] else: stack.append(new_state) #print('## CHANGE STATE:', pos, new_state, stack) patterns = self.tokens[stack[-1]] break else: try: if text[pos] == '\n': pos += 1 continue pos += 1 except IndexError: break
[ "def", "run", "(", "self", ",", "text", ")", ":", "stack", "=", "[", "'root'", "]", "pos", "=", "0", "patterns", "=", "self", ".", "tokens", "[", "stack", "[", "-", "1", "]", "]", "while", "True", ":", "for", "pat", ",", "action", ",", "new_state", "in", "patterns", ":", "m", "=", "pat", ".", "match", "(", "text", ",", "pos", ")", "if", "m", ":", "if", "action", ":", "#print('## MATCH: {} -> {}'.format(m.group(), action))", "yield", "(", "pos", ",", "m", ".", "end", "(", ")", "-", "1", ")", ",", "action", ",", "m", ".", "groups", "(", ")", "pos", "=", "m", ".", "end", "(", ")", "if", "new_state", ":", "if", "isinstance", "(", "new_state", ",", "int", ")", ":", "# Pop states", "del", "stack", "[", "new_state", ":", "]", "else", ":", "stack", ".", "append", "(", "new_state", ")", "#print('## CHANGE STATE:', pos, new_state, stack)", "patterns", "=", "self", ".", "tokens", "[", "stack", "[", "-", "1", "]", "]", "break", "else", ":", "try", ":", "if", "text", "[", "pos", "]", "==", "'\\n'", ":", "pos", "+=", "1", "continue", "pos", "+=", "1", "except", "IndexError", ":", "break" ]
Run lexer rules against a source text Args: text (str): Text to apply lexer to Yields: A sequence of lexer matches.
[ "Run", "lexer", "rules", "against", "a", "source", "text" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/minilexer.py#L43-L86
kevinpt/hdlparse
doc/conf.py
get_package_version
def get_package_version(verfile): '''Scan the script for the version string''' version = None with open(verfile) as fh: try: version = [line.split('=')[1].strip().strip("'") for line in fh if \ line.startswith('__version__')][0] except IndexError: pass return version
python
def get_package_version(verfile): '''Scan the script for the version string''' version = None with open(verfile) as fh: try: version = [line.split('=')[1].strip().strip("'") for line in fh if \ line.startswith('__version__')][0] except IndexError: pass return version
[ "def", "get_package_version", "(", "verfile", ")", ":", "version", "=", "None", "with", "open", "(", "verfile", ")", "as", "fh", ":", "try", ":", "version", "=", "[", "line", ".", "split", "(", "'='", ")", "[", "1", "]", ".", "strip", "(", ")", ".", "strip", "(", "\"'\"", ")", "for", "line", "in", "fh", "if", "line", ".", "startswith", "(", "'__version__'", ")", "]", "[", "0", "]", "except", "IndexError", ":", "pass", "return", "version" ]
Scan the script for the version string
[ "Scan", "the", "script", "for", "the", "version", "string" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/doc/conf.py#L57-L66
kevinpt/hdlparse
hdlparse/verilog_parser.py
parse_verilog_file
def parse_verilog_file(fname): '''Parse a named Verilog file Args: fname (str): File to parse. Returns: List of parsed objects. ''' with open(fname, 'rt') as fh: text = fh.read() return parse_verilog(text)
python
def parse_verilog_file(fname): '''Parse a named Verilog file Args: fname (str): File to parse. Returns: List of parsed objects. ''' with open(fname, 'rt') as fh: text = fh.read() return parse_verilog(text)
[ "def", "parse_verilog_file", "(", "fname", ")", ":", "with", "open", "(", "fname", ",", "'rt'", ")", "as", "fh", ":", "text", "=", "fh", ".", "read", "(", ")", "return", "parse_verilog", "(", "text", ")" ]
Parse a named Verilog file Args: fname (str): File to parse. Returns: List of parsed objects.
[ "Parse", "a", "named", "Verilog", "file", "Args", ":", "fname", "(", "str", ")", ":", "File", "to", "parse", ".", "Returns", ":", "List", "of", "parsed", "objects", "." ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/verilog_parser.py#L90-L100
kevinpt/hdlparse
hdlparse/verilog_parser.py
parse_verilog
def parse_verilog(text): '''Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects. ''' lex = VerilogLexer name = None kind = None saved_type = None mode = 'input' ptype = 'wire' metacomments = [] parameters = [] param_items = [] generics = [] ports = collections.OrderedDict() sections = [] port_param_index = 0 last_item = None array_range_start_pos = 0 objects = [] for pos, action, groups in lex.run(text): if action == 'metacomment': if last_item is None: metacomments.append(groups[0]) else: last_item.desc = groups[0] if action == 'section_meta': sections.append((port_param_index, groups[0])) elif action == 'module': kind = 'module' name = groups[0] generics = [] ports = collections.OrderedDict() param_items = [] sections = [] port_param_index = 0 elif action == 'parameter_start': net_type, vec_range = groups new_ptype = '' if net_type is not None: new_ptype += net_type if vec_range is not None: new_ptype += ' ' + vec_range ptype = new_ptype elif action == 'param_item': generics.append(VerilogParameter(groups[0], 'in', ptype)) elif action == 'module_port_start': new_mode, net_type, signed, vec_range = groups new_ptype = '' if net_type is not None: new_ptype += net_type if signed is not None: new_ptype += ' ' + signed if vec_range is not None: new_ptype += ' ' + vec_range # Complete pending items for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) param_items = [] if len(ports) > 0: last_item = next(reversed(ports)) # Start with new mode mode = new_mode ptype = new_ptype elif action == 'port_param': ident = groups[0] param_items.append(ident) port_param_index += 1 elif action == 'end_module': # Finish any pending ports for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments) objects.append(vobj) last_item = None metacomments = [] return objects
python
def parse_verilog(text): '''Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects. ''' lex = VerilogLexer name = None kind = None saved_type = None mode = 'input' ptype = 'wire' metacomments = [] parameters = [] param_items = [] generics = [] ports = collections.OrderedDict() sections = [] port_param_index = 0 last_item = None array_range_start_pos = 0 objects = [] for pos, action, groups in lex.run(text): if action == 'metacomment': if last_item is None: metacomments.append(groups[0]) else: last_item.desc = groups[0] if action == 'section_meta': sections.append((port_param_index, groups[0])) elif action == 'module': kind = 'module' name = groups[0] generics = [] ports = collections.OrderedDict() param_items = [] sections = [] port_param_index = 0 elif action == 'parameter_start': net_type, vec_range = groups new_ptype = '' if net_type is not None: new_ptype += net_type if vec_range is not None: new_ptype += ' ' + vec_range ptype = new_ptype elif action == 'param_item': generics.append(VerilogParameter(groups[0], 'in', ptype)) elif action == 'module_port_start': new_mode, net_type, signed, vec_range = groups new_ptype = '' if net_type is not None: new_ptype += net_type if signed is not None: new_ptype += ' ' + signed if vec_range is not None: new_ptype += ' ' + vec_range # Complete pending items for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) param_items = [] if len(ports) > 0: last_item = next(reversed(ports)) # Start with new mode mode = new_mode ptype = new_ptype elif action == 'port_param': ident = groups[0] param_items.append(ident) port_param_index += 1 elif action == 'end_module': # Finish any pending ports for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments) objects.append(vobj) last_item = None metacomments = [] return objects
[ "def", "parse_verilog", "(", "text", ")", ":", "lex", "=", "VerilogLexer", "name", "=", "None", "kind", "=", "None", "saved_type", "=", "None", "mode", "=", "'input'", "ptype", "=", "'wire'", "metacomments", "=", "[", "]", "parameters", "=", "[", "]", "param_items", "=", "[", "]", "generics", "=", "[", "]", "ports", "=", "collections", ".", "OrderedDict", "(", ")", "sections", "=", "[", "]", "port_param_index", "=", "0", "last_item", "=", "None", "array_range_start_pos", "=", "0", "objects", "=", "[", "]", "for", "pos", ",", "action", ",", "groups", "in", "lex", ".", "run", "(", "text", ")", ":", "if", "action", "==", "'metacomment'", ":", "if", "last_item", "is", "None", ":", "metacomments", ".", "append", "(", "groups", "[", "0", "]", ")", "else", ":", "last_item", ".", "desc", "=", "groups", "[", "0", "]", "if", "action", "==", "'section_meta'", ":", "sections", ".", "append", "(", "(", "port_param_index", ",", "groups", "[", "0", "]", ")", ")", "elif", "action", "==", "'module'", ":", "kind", "=", "'module'", "name", "=", "groups", "[", "0", "]", "generics", "=", "[", "]", "ports", "=", "collections", ".", "OrderedDict", "(", ")", "param_items", "=", "[", "]", "sections", "=", "[", "]", "port_param_index", "=", "0", "elif", "action", "==", "'parameter_start'", ":", "net_type", ",", "vec_range", "=", "groups", "new_ptype", "=", "''", "if", "net_type", "is", "not", "None", ":", "new_ptype", "+=", "net_type", "if", "vec_range", "is", "not", "None", ":", "new_ptype", "+=", "' '", "+", "vec_range", "ptype", "=", "new_ptype", "elif", "action", "==", "'param_item'", ":", "generics", ".", "append", "(", "VerilogParameter", "(", "groups", "[", "0", "]", ",", "'in'", ",", "ptype", ")", ")", "elif", "action", "==", "'module_port_start'", ":", "new_mode", ",", "net_type", ",", "signed", ",", "vec_range", "=", "groups", "new_ptype", "=", "''", "if", "net_type", "is", "not", "None", ":", "new_ptype", "+=", "net_type", "if", "signed", "is", "not", "None", ":", "new_ptype", "+=", "' '", "+", "signed", "if", "vec_range", "is", "not", "None", ":", "new_ptype", "+=", "' '", "+", "vec_range", "# Complete pending items", "for", "i", "in", "param_items", ":", "ports", "[", "i", "]", "=", "VerilogParameter", "(", "i", ",", "mode", ",", "ptype", ")", "param_items", "=", "[", "]", "if", "len", "(", "ports", ")", ">", "0", ":", "last_item", "=", "next", "(", "reversed", "(", "ports", ")", ")", "# Start with new mode", "mode", "=", "new_mode", "ptype", "=", "new_ptype", "elif", "action", "==", "'port_param'", ":", "ident", "=", "groups", "[", "0", "]", "param_items", ".", "append", "(", "ident", ")", "port_param_index", "+=", "1", "elif", "action", "==", "'end_module'", ":", "# Finish any pending ports", "for", "i", "in", "param_items", ":", "ports", "[", "i", "]", "=", "VerilogParameter", "(", "i", ",", "mode", ",", "ptype", ")", "vobj", "=", "VerilogModule", "(", "name", ",", "ports", ".", "values", "(", ")", ",", "generics", ",", "dict", "(", "sections", ")", ",", "metacomments", ")", "objects", ".", "append", "(", "vobj", ")", "last_item", "=", "None", "metacomments", "=", "[", "]", "return", "objects" ]
Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects.
[ "Parse", "a", "text", "buffer", "of", "Verilog", "code" ]
train
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/verilog_parser.py#L102-L206