repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
|---|---|---|---|---|---|---|---|---|---|---|
radjkarl/imgProcessor
|
imgProcessor/camera/NoiseLevelFunction.py
|
_validI
|
def _validI(x, y, weights):
'''
return indices that have enough data points and are not erroneous
'''
# density filter:
i = np.logical_and(np.isfinite(y), weights > np.median(weights))
# filter outliers:
try:
grad = np.abs(np.gradient(y[i]))
max_gradient = 4 * np.median(grad)
i[i][grad > max_gradient] = False
except (IndexError, ValueError):
pass
return i
|
python
|
def _validI(x, y, weights):
'''
return indices that have enough data points and are not erroneous
'''
# density filter:
i = np.logical_and(np.isfinite(y), weights > np.median(weights))
# filter outliers:
try:
grad = np.abs(np.gradient(y[i]))
max_gradient = 4 * np.median(grad)
i[i][grad > max_gradient] = False
except (IndexError, ValueError):
pass
return i
|
[
"def",
"_validI",
"(",
"x",
",",
"y",
",",
"weights",
")",
":",
"# density filter:\r",
"i",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"isfinite",
"(",
"y",
")",
",",
"weights",
">",
"np",
".",
"median",
"(",
"weights",
")",
")",
"# filter outliers:\r",
"try",
":",
"grad",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"gradient",
"(",
"y",
"[",
"i",
"]",
")",
")",
"max_gradient",
"=",
"4",
"*",
"np",
".",
"median",
"(",
"grad",
")",
"i",
"[",
"i",
"]",
"[",
"grad",
">",
"max_gradient",
"]",
"=",
"False",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"return",
"i"
] |
return indices that have enough data points and are not erroneous
|
[
"return",
"indices",
"that",
"have",
"enough",
"data",
"points",
"and",
"are",
"not",
"erroneous"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L110-L123
|
radjkarl/imgProcessor
|
imgProcessor/camera/NoiseLevelFunction.py
|
smooth
|
def smooth(x, y, weights):
'''
in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation
'''
# Spline hard to smooth properly, therefore solfed with
# bounded polynomal interpolation
# ext=3: no extrapolation, but boundary value
# return UnivariateSpline(x, y, w=weights,
# s=len(y)*weights.max()*100, ext=3)
# return np.poly1d(np.polyfit(x,y,w=weights,deg=2))
p = np.polyfit(x, y, w=weights, deg=2)
if np.any(np.isnan(p)):
# couldn't even do polynomial fit
# as last option: assume constant noise
my = np.average(y, weights=weights)
return lambda x: my
return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1]))
|
python
|
def smooth(x, y, weights):
'''
in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation
'''
# Spline hard to smooth properly, therefore solfed with
# bounded polynomal interpolation
# ext=3: no extrapolation, but boundary value
# return UnivariateSpline(x, y, w=weights,
# s=len(y)*weights.max()*100, ext=3)
# return np.poly1d(np.polyfit(x,y,w=weights,deg=2))
p = np.polyfit(x, y, w=weights, deg=2)
if np.any(np.isnan(p)):
# couldn't even do polynomial fit
# as last option: assume constant noise
my = np.average(y, weights=weights)
return lambda x: my
return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1]))
|
[
"def",
"smooth",
"(",
"x",
",",
"y",
",",
"weights",
")",
":",
"# Spline hard to smooth properly, therefore solfed with\r",
"# bounded polynomal interpolation\r",
"# ext=3: no extrapolation, but boundary value\r",
"# return UnivariateSpline(x, y, w=weights,\r",
"# s=len(y)*weights.max()*100, ext=3)\r",
"# return np.poly1d(np.polyfit(x,y,w=weights,deg=2))\r",
"p",
"=",
"np",
".",
"polyfit",
"(",
"x",
",",
"y",
",",
"w",
"=",
"weights",
",",
"deg",
"=",
"2",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"p",
")",
")",
":",
"# couldn't even do polynomial fit\r",
"# as last option: assume constant noise\r",
"my",
"=",
"np",
".",
"average",
"(",
"y",
",",
"weights",
"=",
"weights",
")",
"return",
"lambda",
"x",
":",
"my",
"return",
"lambda",
"xint",
":",
"np",
".",
"poly1d",
"(",
"p",
")",
"(",
"np",
".",
"clip",
"(",
"xint",
",",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"-",
"1",
"]",
")",
")"
] |
in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation
|
[
"in",
"case",
"the",
"NLF",
"cannot",
"be",
"described",
"by",
"a",
"square",
"root",
"function",
"commit",
"bounded",
"polynomial",
"interpolation"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L131-L150
|
radjkarl/imgProcessor
|
imgProcessor/camera/NoiseLevelFunction.py
|
oneImageNLF
|
def oneImageNLF(img, img2=None, signal=None):
'''
Estimate the NLF from one or two images of the same kind
'''
x, y, weights, signal = calcNLF(img, img2, signal)
_, fn, _ = _evaluate(x, y, weights)
return fn, signal
|
python
|
def oneImageNLF(img, img2=None, signal=None):
'''
Estimate the NLF from one or two images of the same kind
'''
x, y, weights, signal = calcNLF(img, img2, signal)
_, fn, _ = _evaluate(x, y, weights)
return fn, signal
|
[
"def",
"oneImageNLF",
"(",
"img",
",",
"img2",
"=",
"None",
",",
"signal",
"=",
"None",
")",
":",
"x",
",",
"y",
",",
"weights",
",",
"signal",
"=",
"calcNLF",
"(",
"img",
",",
"img2",
",",
"signal",
")",
"_",
",",
"fn",
",",
"_",
"=",
"_evaluate",
"(",
"x",
",",
"y",
",",
"weights",
")",
"return",
"fn",
",",
"signal"
] |
Estimate the NLF from one or two images of the same kind
|
[
"Estimate",
"the",
"NLF",
"from",
"one",
"or",
"two",
"images",
"of",
"the",
"same",
"kind"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L153-L159
|
radjkarl/imgProcessor
|
imgProcessor/camera/NoiseLevelFunction.py
|
_getMinMax
|
def _getMinMax(img):
'''
Get the a range of image intensities
that most pixels are in with
'''
av = np.mean(img)
std = np.std(img)
# define range for segmentation:
mn = av - 3 * std
mx = av + 3 * std
return max(img.min(), mn, 0), min(img.max(), mx)
|
python
|
def _getMinMax(img):
'''
Get the a range of image intensities
that most pixels are in with
'''
av = np.mean(img)
std = np.std(img)
# define range for segmentation:
mn = av - 3 * std
mx = av + 3 * std
return max(img.min(), mn, 0), min(img.max(), mx)
|
[
"def",
"_getMinMax",
"(",
"img",
")",
":",
"av",
"=",
"np",
".",
"mean",
"(",
"img",
")",
"std",
"=",
"np",
".",
"std",
"(",
"img",
")",
"# define range for segmentation:\r",
"mn",
"=",
"av",
"-",
"3",
"*",
"std",
"mx",
"=",
"av",
"+",
"3",
"*",
"std",
"return",
"max",
"(",
"img",
".",
"min",
"(",
")",
",",
"mn",
",",
"0",
")",
",",
"min",
"(",
"img",
".",
"max",
"(",
")",
",",
"mx",
")"
] |
Get the a range of image intensities
that most pixels are in with
|
[
"Get",
"the",
"a",
"range",
"of",
"image",
"intensities",
"that",
"most",
"pixels",
"are",
"in",
"with"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L162-L173
|
radjkarl/imgProcessor
|
imgProcessor/camera/NoiseLevelFunction.py
|
calcNLF
|
def calcNLF(img, img2=None, signal=None, mn_mx_nbins=None, x=None,
averageFn='AAD',
signalFromMultipleImages=False):
'''
Calculate the noise level function (NLF) as f(intensity)
using one or two image.
The approach for this work is published in JPV##########
img2 - 2nd image taken under same conditions
used to estimate noise via image difference
signalFromMultipleImages - whether the signal is an average of multiple
images and not just got from one median filtered image
'''
# CONSTANTS:
# factor Root mead square to average-absolute-difference:
F_RMS2AAD = (2 / np.pi)**-0.5
F_NOISE_WITH_MEDIAN = 1 + (1 / 3**2)
N_BINS = 100
MEDIAN_KERNEL_SIZE = 3
def _averageAbsoluteDeviation(d):
return np.mean(np.abs(d)) * F_RMS2AAD
def _rootMeanSquare(d):
return (d**2).mean()**0.5
if averageFn == 'AAD':
averageFn = _averageAbsoluteDeviation
else:
averageFn = _rootMeanSquare
img = np.asfarray(img)
if img2 is None:
if signal is None:
signal = median_filter(img, MEDIAN_KERNEL_SIZE)
if signalFromMultipleImages:
diff = img - signal
else:
# difference between the filtered and original image:
diff = (img - signal) * F_NOISE_WITH_MEDIAN
else:
img2 = np.asfarray(img2)
diff = (img - img2)
# 2**0.5 because noise is subtracted by noise
# and variance of sum = sum of variance:
# var(immg1-img2)~2*var(img)
# std(2*var) = 2**0.5*var**0.5
diff /= 2**0.5
if signal is None:
signal = median_filter(0.5 * (img + img2), MEDIAN_KERNEL_SIZE)
if mn_mx_nbins is not None:
mn, mx, nbins = mn_mx_nbins
min_len = 0
else:
mn, mx = _getMinMax(signal)
s = img.shape
min_len = int(s[0] * s[1] * 1e-3)
if min_len < 1:
min_len = 5
# number of bins/different intensity ranges to analyse:
nbins = N_BINS
if mx - mn < nbins:
nbins = int(mx - mn)
# bin width:
step = (mx - mn) / nbins
# empty arrays:
y = np.empty(shape=nbins)
set_x = False
if x is None:
set_x = True
x = np.empty(shape=nbins)
# give bins with more samples more weight:
weights = np.zeros(shape=nbins)
# cur step:
m = mn
for n in range(nbins):
# get indices of all pixel with in a bin:
ind = np.logical_and(signal >= m, signal <= m + step)
m += step
d = diff[ind]
ld = len(d)
if ld >= min_len:
weights[n] = ld
# average absolute deviation (AAD),
# scaled to RMS:
y[n] = averageFn(d)
if set_x:
x[n] = m - 0.5 * step
return x, y, weights, signal
|
python
|
def calcNLF(img, img2=None, signal=None, mn_mx_nbins=None, x=None,
averageFn='AAD',
signalFromMultipleImages=False):
'''
Calculate the noise level function (NLF) as f(intensity)
using one or two image.
The approach for this work is published in JPV##########
img2 - 2nd image taken under same conditions
used to estimate noise via image difference
signalFromMultipleImages - whether the signal is an average of multiple
images and not just got from one median filtered image
'''
# CONSTANTS:
# factor Root mead square to average-absolute-difference:
F_RMS2AAD = (2 / np.pi)**-0.5
F_NOISE_WITH_MEDIAN = 1 + (1 / 3**2)
N_BINS = 100
MEDIAN_KERNEL_SIZE = 3
def _averageAbsoluteDeviation(d):
return np.mean(np.abs(d)) * F_RMS2AAD
def _rootMeanSquare(d):
return (d**2).mean()**0.5
if averageFn == 'AAD':
averageFn = _averageAbsoluteDeviation
else:
averageFn = _rootMeanSquare
img = np.asfarray(img)
if img2 is None:
if signal is None:
signal = median_filter(img, MEDIAN_KERNEL_SIZE)
if signalFromMultipleImages:
diff = img - signal
else:
# difference between the filtered and original image:
diff = (img - signal) * F_NOISE_WITH_MEDIAN
else:
img2 = np.asfarray(img2)
diff = (img - img2)
# 2**0.5 because noise is subtracted by noise
# and variance of sum = sum of variance:
# var(immg1-img2)~2*var(img)
# std(2*var) = 2**0.5*var**0.5
diff /= 2**0.5
if signal is None:
signal = median_filter(0.5 * (img + img2), MEDIAN_KERNEL_SIZE)
if mn_mx_nbins is not None:
mn, mx, nbins = mn_mx_nbins
min_len = 0
else:
mn, mx = _getMinMax(signal)
s = img.shape
min_len = int(s[0] * s[1] * 1e-3)
if min_len < 1:
min_len = 5
# number of bins/different intensity ranges to analyse:
nbins = N_BINS
if mx - mn < nbins:
nbins = int(mx - mn)
# bin width:
step = (mx - mn) / nbins
# empty arrays:
y = np.empty(shape=nbins)
set_x = False
if x is None:
set_x = True
x = np.empty(shape=nbins)
# give bins with more samples more weight:
weights = np.zeros(shape=nbins)
# cur step:
m = mn
for n in range(nbins):
# get indices of all pixel with in a bin:
ind = np.logical_and(signal >= m, signal <= m + step)
m += step
d = diff[ind]
ld = len(d)
if ld >= min_len:
weights[n] = ld
# average absolute deviation (AAD),
# scaled to RMS:
y[n] = averageFn(d)
if set_x:
x[n] = m - 0.5 * step
return x, y, weights, signal
|
[
"def",
"calcNLF",
"(",
"img",
",",
"img2",
"=",
"None",
",",
"signal",
"=",
"None",
",",
"mn_mx_nbins",
"=",
"None",
",",
"x",
"=",
"None",
",",
"averageFn",
"=",
"'AAD'",
",",
"signalFromMultipleImages",
"=",
"False",
")",
":",
"# CONSTANTS:\r",
"# factor Root mead square to average-absolute-difference:\r",
"F_RMS2AAD",
"=",
"(",
"2",
"/",
"np",
".",
"pi",
")",
"**",
"-",
"0.5",
"F_NOISE_WITH_MEDIAN",
"=",
"1",
"+",
"(",
"1",
"/",
"3",
"**",
"2",
")",
"N_BINS",
"=",
"100",
"MEDIAN_KERNEL_SIZE",
"=",
"3",
"def",
"_averageAbsoluteDeviation",
"(",
"d",
")",
":",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"d",
")",
")",
"*",
"F_RMS2AAD",
"def",
"_rootMeanSquare",
"(",
"d",
")",
":",
"return",
"(",
"d",
"**",
"2",
")",
".",
"mean",
"(",
")",
"**",
"0.5",
"if",
"averageFn",
"==",
"'AAD'",
":",
"averageFn",
"=",
"_averageAbsoluteDeviation",
"else",
":",
"averageFn",
"=",
"_rootMeanSquare",
"img",
"=",
"np",
".",
"asfarray",
"(",
"img",
")",
"if",
"img2",
"is",
"None",
":",
"if",
"signal",
"is",
"None",
":",
"signal",
"=",
"median_filter",
"(",
"img",
",",
"MEDIAN_KERNEL_SIZE",
")",
"if",
"signalFromMultipleImages",
":",
"diff",
"=",
"img",
"-",
"signal",
"else",
":",
"# difference between the filtered and original image:\r",
"diff",
"=",
"(",
"img",
"-",
"signal",
")",
"*",
"F_NOISE_WITH_MEDIAN",
"else",
":",
"img2",
"=",
"np",
".",
"asfarray",
"(",
"img2",
")",
"diff",
"=",
"(",
"img",
"-",
"img2",
")",
"# 2**0.5 because noise is subtracted by noise\r",
"# and variance of sum = sum of variance:\r",
"# var(immg1-img2)~2*var(img)\r",
"# std(2*var) = 2**0.5*var**0.5\r",
"diff",
"/=",
"2",
"**",
"0.5",
"if",
"signal",
"is",
"None",
":",
"signal",
"=",
"median_filter",
"(",
"0.5",
"*",
"(",
"img",
"+",
"img2",
")",
",",
"MEDIAN_KERNEL_SIZE",
")",
"if",
"mn_mx_nbins",
"is",
"not",
"None",
":",
"mn",
",",
"mx",
",",
"nbins",
"=",
"mn_mx_nbins",
"min_len",
"=",
"0",
"else",
":",
"mn",
",",
"mx",
"=",
"_getMinMax",
"(",
"signal",
")",
"s",
"=",
"img",
".",
"shape",
"min_len",
"=",
"int",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
"*",
"1e-3",
")",
"if",
"min_len",
"<",
"1",
":",
"min_len",
"=",
"5",
"# number of bins/different intensity ranges to analyse:\r",
"nbins",
"=",
"N_BINS",
"if",
"mx",
"-",
"mn",
"<",
"nbins",
":",
"nbins",
"=",
"int",
"(",
"mx",
"-",
"mn",
")",
"# bin width:\r",
"step",
"=",
"(",
"mx",
"-",
"mn",
")",
"/",
"nbins",
"# empty arrays:\r",
"y",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"nbins",
")",
"set_x",
"=",
"False",
"if",
"x",
"is",
"None",
":",
"set_x",
"=",
"True",
"x",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"nbins",
")",
"# give bins with more samples more weight:\r",
"weights",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"nbins",
")",
"# cur step:\r",
"m",
"=",
"mn",
"for",
"n",
"in",
"range",
"(",
"nbins",
")",
":",
"# get indices of all pixel with in a bin:\r",
"ind",
"=",
"np",
".",
"logical_and",
"(",
"signal",
">=",
"m",
",",
"signal",
"<=",
"m",
"+",
"step",
")",
"m",
"+=",
"step",
"d",
"=",
"diff",
"[",
"ind",
"]",
"ld",
"=",
"len",
"(",
"d",
")",
"if",
"ld",
">=",
"min_len",
":",
"weights",
"[",
"n",
"]",
"=",
"ld",
"# average absolute deviation (AAD),\r",
"# scaled to RMS:\r",
"y",
"[",
"n",
"]",
"=",
"averageFn",
"(",
"d",
")",
"if",
"set_x",
":",
"x",
"[",
"n",
"]",
"=",
"m",
"-",
"0.5",
"*",
"step",
"return",
"x",
",",
"y",
",",
"weights",
",",
"signal"
] |
Calculate the noise level function (NLF) as f(intensity)
using one or two image.
The approach for this work is published in JPV##########
img2 - 2nd image taken under same conditions
used to estimate noise via image difference
signalFromMultipleImages - whether the signal is an average of multiple
images and not just got from one median filtered image
|
[
"Calculate",
"the",
"noise",
"level",
"function",
"(",
"NLF",
")",
"as",
"f",
"(",
"intensity",
")",
"using",
"one",
"or",
"two",
"image",
".",
"The",
"approach",
"for",
"this",
"work",
"is",
"published",
"in",
"JPV##########",
"img2",
"-",
"2nd",
"image",
"taken",
"under",
"same",
"conditions",
"used",
"to",
"estimate",
"noise",
"via",
"image",
"difference",
"signalFromMultipleImages",
"-",
"whether",
"the",
"signal",
"is",
"an",
"average",
"of",
"multiple",
"images",
"and",
"not",
"just",
"got",
"from",
"one",
"median",
"filtered",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L176-L271
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/polyfit2d.py
|
polyfit2d
|
def polyfit2d(x, y, z, order=3 #bounds=None
):
'''
fit unstructured data
'''
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(list(range(order+1)), list(range(order+1)))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m = np.linalg.lstsq(G, z)[0]
return m
|
python
|
def polyfit2d(x, y, z, order=3 #bounds=None
):
'''
fit unstructured data
'''
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(list(range(order+1)), list(range(order+1)))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m = np.linalg.lstsq(G, z)[0]
return m
|
[
"def",
"polyfit2d",
"(",
"x",
",",
"y",
",",
"z",
",",
"order",
"=",
"3",
"#bounds=None\r",
")",
":",
"ncols",
"=",
"(",
"order",
"+",
"1",
")",
"**",
"2",
"G",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
".",
"size",
",",
"ncols",
")",
")",
"ij",
"=",
"itertools",
".",
"product",
"(",
"list",
"(",
"range",
"(",
"order",
"+",
"1",
")",
")",
",",
"list",
"(",
"range",
"(",
"order",
"+",
"1",
")",
")",
")",
"for",
"k",
",",
"(",
"i",
",",
"j",
")",
"in",
"enumerate",
"(",
"ij",
")",
":",
"G",
"[",
":",
",",
"k",
"]",
"=",
"x",
"**",
"i",
"*",
"y",
"**",
"j",
"m",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"G",
",",
"z",
")",
"[",
"0",
"]",
"return",
"m"
] |
fit unstructured data
|
[
"fit",
"unstructured",
"data"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/polyfit2d.py#L8-L19
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/polyfit2d.py
|
polyfit2dGrid
|
def polyfit2dGrid(arr, mask=None, order=3, replace_all=False,
copy=True, outgrid=None):
'''
replace all masked values with polynomial fitted ones
'''
s0,s1 = arr.shape
if mask is None:
if outgrid is None:
y,x = np.mgrid[:float(s0),:float(s1)]
p = polyfit2d(x.flatten(),y.flatten(),arr.flatten(),order)
return polyval2d(x,y, p, dtype=arr.dtype)
mask = np.zeros_like(arr, dtype=bool)
elif mask.sum() == 0 and not replace_all and outgrid is None:
return arr
valid = ~mask
y,x = np.where(valid)
z = arr[valid]
p = polyfit2d(x,y,z,order)
if outgrid is not None:
yy,xx = outgrid
else:
if replace_all:
yy,xx = np.mgrid[:float(s0),:float(s1)]
else:
yy,xx = np.where(mask)
new = polyval2d(xx,yy, p, dtype=arr.dtype)
if outgrid is not None or replace_all:
return new
if copy:
arr = arr.copy()
arr[mask] = new
return arr
|
python
|
def polyfit2dGrid(arr, mask=None, order=3, replace_all=False,
copy=True, outgrid=None):
'''
replace all masked values with polynomial fitted ones
'''
s0,s1 = arr.shape
if mask is None:
if outgrid is None:
y,x = np.mgrid[:float(s0),:float(s1)]
p = polyfit2d(x.flatten(),y.flatten(),arr.flatten(),order)
return polyval2d(x,y, p, dtype=arr.dtype)
mask = np.zeros_like(arr, dtype=bool)
elif mask.sum() == 0 and not replace_all and outgrid is None:
return arr
valid = ~mask
y,x = np.where(valid)
z = arr[valid]
p = polyfit2d(x,y,z,order)
if outgrid is not None:
yy,xx = outgrid
else:
if replace_all:
yy,xx = np.mgrid[:float(s0),:float(s1)]
else:
yy,xx = np.where(mask)
new = polyval2d(xx,yy, p, dtype=arr.dtype)
if outgrid is not None or replace_all:
return new
if copy:
arr = arr.copy()
arr[mask] = new
return arr
|
[
"def",
"polyfit2dGrid",
"(",
"arr",
",",
"mask",
"=",
"None",
",",
"order",
"=",
"3",
",",
"replace_all",
"=",
"False",
",",
"copy",
"=",
"True",
",",
"outgrid",
"=",
"None",
")",
":",
"s0",
",",
"s1",
"=",
"arr",
".",
"shape",
"if",
"mask",
"is",
"None",
":",
"if",
"outgrid",
"is",
"None",
":",
"y",
",",
"x",
"=",
"np",
".",
"mgrid",
"[",
":",
"float",
"(",
"s0",
")",
",",
":",
"float",
"(",
"s1",
")",
"]",
"p",
"=",
"polyfit2d",
"(",
"x",
".",
"flatten",
"(",
")",
",",
"y",
".",
"flatten",
"(",
")",
",",
"arr",
".",
"flatten",
"(",
")",
",",
"order",
")",
"return",
"polyval2d",
"(",
"x",
",",
"y",
",",
"p",
",",
"dtype",
"=",
"arr",
".",
"dtype",
")",
"mask",
"=",
"np",
".",
"zeros_like",
"(",
"arr",
",",
"dtype",
"=",
"bool",
")",
"elif",
"mask",
".",
"sum",
"(",
")",
"==",
"0",
"and",
"not",
"replace_all",
"and",
"outgrid",
"is",
"None",
":",
"return",
"arr",
"valid",
"=",
"~",
"mask",
"y",
",",
"x",
"=",
"np",
".",
"where",
"(",
"valid",
")",
"z",
"=",
"arr",
"[",
"valid",
"]",
"p",
"=",
"polyfit2d",
"(",
"x",
",",
"y",
",",
"z",
",",
"order",
")",
"if",
"outgrid",
"is",
"not",
"None",
":",
"yy",
",",
"xx",
"=",
"outgrid",
"else",
":",
"if",
"replace_all",
":",
"yy",
",",
"xx",
"=",
"np",
".",
"mgrid",
"[",
":",
"float",
"(",
"s0",
")",
",",
":",
"float",
"(",
"s1",
")",
"]",
"else",
":",
"yy",
",",
"xx",
"=",
"np",
".",
"where",
"(",
"mask",
")",
"new",
"=",
"polyval2d",
"(",
"xx",
",",
"yy",
",",
"p",
",",
"dtype",
"=",
"arr",
".",
"dtype",
")",
"if",
"outgrid",
"is",
"not",
"None",
"or",
"replace_all",
":",
"return",
"new",
"if",
"copy",
":",
"arr",
"=",
"arr",
".",
"copy",
"(",
")",
"arr",
"[",
"mask",
"]",
"=",
"new",
"return",
"arr"
] |
replace all masked values with polynomial fitted ones
|
[
"replace",
"all",
"masked",
"values",
"with",
"polynomial",
"fitted",
"ones"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/polyfit2d.py#L31-L65
|
radjkarl/imgProcessor
|
imgProcessor/features/minimumLineInArray.py
|
minimumLineInArray
|
def minimumLineInArray(arr, relative=False, f=0,
refinePosition=True,
max_pos=100,
return_pos_arr=False,
# order=2
):
'''
find closest minimum position next to middle line
relative: return position relative to middle line
f: relative decrease (0...1) - setting this value close to one will
discriminate positions further away from the center
##order: 2 for cubic refinement
'''
s0, s1 = arr.shape[:2]
if max_pos >= s1:
x = np.arange(s1)
else:
# take fewer positions within 0->(s1-1)
x = np.rint(np.linspace(0, s1 - 1, min(max_pos, s1))).astype(int)
res = np.empty((s0, s0), dtype=float)
_lineSumXY(x, res, arr, f)
if return_pos_arr:
return res
# best integer index
i, j = np.unravel_index(np.nanargmin(res), res.shape)
if refinePosition:
try:
sub = res[i - 1:i + 2, j - 1:j + 2]
ii, jj = center_of_mass(sub)
if not np.isnan(ii):
i += (ii - 1)
if not np.isnan(jj):
j += (jj - 1)
except TypeError:
pass
if not relative:
return i, j
hs = (s0 - 1) / 2
return i - hs, j - hs
|
python
|
def minimumLineInArray(arr, relative=False, f=0,
refinePosition=True,
max_pos=100,
return_pos_arr=False,
# order=2
):
'''
find closest minimum position next to middle line
relative: return position relative to middle line
f: relative decrease (0...1) - setting this value close to one will
discriminate positions further away from the center
##order: 2 for cubic refinement
'''
s0, s1 = arr.shape[:2]
if max_pos >= s1:
x = np.arange(s1)
else:
# take fewer positions within 0->(s1-1)
x = np.rint(np.linspace(0, s1 - 1, min(max_pos, s1))).astype(int)
res = np.empty((s0, s0), dtype=float)
_lineSumXY(x, res, arr, f)
if return_pos_arr:
return res
# best integer index
i, j = np.unravel_index(np.nanargmin(res), res.shape)
if refinePosition:
try:
sub = res[i - 1:i + 2, j - 1:j + 2]
ii, jj = center_of_mass(sub)
if not np.isnan(ii):
i += (ii - 1)
if not np.isnan(jj):
j += (jj - 1)
except TypeError:
pass
if not relative:
return i, j
hs = (s0 - 1) / 2
return i - hs, j - hs
|
[
"def",
"minimumLineInArray",
"(",
"arr",
",",
"relative",
"=",
"False",
",",
"f",
"=",
"0",
",",
"refinePosition",
"=",
"True",
",",
"max_pos",
"=",
"100",
",",
"return_pos_arr",
"=",
"False",
",",
"# order=2\r",
")",
":",
"s0",
",",
"s1",
"=",
"arr",
".",
"shape",
"[",
":",
"2",
"]",
"if",
"max_pos",
">=",
"s1",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"s1",
")",
"else",
":",
"# take fewer positions within 0->(s1-1)\r",
"x",
"=",
"np",
".",
"rint",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"s1",
"-",
"1",
",",
"min",
"(",
"max_pos",
",",
"s1",
")",
")",
")",
".",
"astype",
"(",
"int",
")",
"res",
"=",
"np",
".",
"empty",
"(",
"(",
"s0",
",",
"s0",
")",
",",
"dtype",
"=",
"float",
")",
"_lineSumXY",
"(",
"x",
",",
"res",
",",
"arr",
",",
"f",
")",
"if",
"return_pos_arr",
":",
"return",
"res",
"# best integer index\r",
"i",
",",
"j",
"=",
"np",
".",
"unravel_index",
"(",
"np",
".",
"nanargmin",
"(",
"res",
")",
",",
"res",
".",
"shape",
")",
"if",
"refinePosition",
":",
"try",
":",
"sub",
"=",
"res",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"2",
",",
"j",
"-",
"1",
":",
"j",
"+",
"2",
"]",
"ii",
",",
"jj",
"=",
"center_of_mass",
"(",
"sub",
")",
"if",
"not",
"np",
".",
"isnan",
"(",
"ii",
")",
":",
"i",
"+=",
"(",
"ii",
"-",
"1",
")",
"if",
"not",
"np",
".",
"isnan",
"(",
"jj",
")",
":",
"j",
"+=",
"(",
"jj",
"-",
"1",
")",
"except",
"TypeError",
":",
"pass",
"if",
"not",
"relative",
":",
"return",
"i",
",",
"j",
"hs",
"=",
"(",
"s0",
"-",
"1",
")",
"/",
"2",
"return",
"i",
"-",
"hs",
",",
"j",
"-",
"hs"
] |
find closest minimum position next to middle line
relative: return position relative to middle line
f: relative decrease (0...1) - setting this value close to one will
discriminate positions further away from the center
##order: 2 for cubic refinement
|
[
"find",
"closest",
"minimum",
"position",
"next",
"to",
"middle",
"line",
"relative",
":",
"return",
"position",
"relative",
"to",
"middle",
"line",
"f",
":",
"relative",
"decrease",
"(",
"0",
"...",
"1",
")",
"-",
"setting",
"this",
"value",
"close",
"to",
"one",
"will",
"discriminate",
"positions",
"further",
"away",
"from",
"the",
"center",
"##order",
":",
"2",
"for",
"cubic",
"refinement"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/minimumLineInArray.py#L27-L72
|
radjkarl/imgProcessor
|
imgProcessor/filters/FourierFilter.py
|
FourierFilter.highPassFilter
|
def highPassFilter(self, threshold):
'''
remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold)
ty = int(rows * threshold)
# middle:
crow, ccol = rows // 2, cols // 2
# square in the middle to zero
self.fshift[crow - tx:crow + tx, ccol - ty:ccol + ty] = 0
|
python
|
def highPassFilter(self, threshold):
'''
remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold)
ty = int(rows * threshold)
# middle:
crow, ccol = rows // 2, cols // 2
# square in the middle to zero
self.fshift[crow - tx:crow + tx, ccol - ty:ccol + ty] = 0
|
[
"def",
"highPassFilter",
"(",
"self",
",",
"threshold",
")",
":",
"if",
"not",
"threshold",
":",
"return",
"rows",
",",
"cols",
"=",
"self",
".",
"img",
".",
"shape",
"tx",
"=",
"int",
"(",
"cols",
"*",
"threshold",
")",
"ty",
"=",
"int",
"(",
"rows",
"*",
"threshold",
")",
"# middle:\r",
"crow",
",",
"ccol",
"=",
"rows",
"//",
"2",
",",
"cols",
"//",
"2",
"# square in the middle to zero\r",
"self",
".",
"fshift",
"[",
"crow",
"-",
"tx",
":",
"crow",
"+",
"tx",
",",
"ccol",
"-",
"ty",
":",
"ccol",
"+",
"ty",
"]",
"=",
"0"
] |
remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1
|
[
"remove",
"all",
"low",
"frequencies",
"by",
"setting",
"a",
"square",
"in",
"the",
"middle",
"of",
"the",
"Fourier",
"transformation",
"of",
"the",
"size",
"(",
"2",
"*",
"threshold",
")",
"^2",
"to",
"zero",
"threshold",
"=",
"0",
"...",
"1"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/FourierFilter.py#L27-L41
|
radjkarl/imgProcessor
|
imgProcessor/filters/FourierFilter.py
|
FourierFilter.lowPassFilter
|
def lowPassFilter(self, threshold):
'''
remove all high frequencies by setting boundary around a quarry in the middle
of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold * 0.25)
ty = int(rows * threshold * 0.25)
# upper side
self.fshift[rows - tx:rows, :] = 0
# lower side
self.fshift[0:tx, :] = 0
# left side
self.fshift[:, 0:ty] = 0
# right side
self.fshift[:, cols - ty:cols] = 0
|
python
|
def lowPassFilter(self, threshold):
'''
remove all high frequencies by setting boundary around a quarry in the middle
of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold * 0.25)
ty = int(rows * threshold * 0.25)
# upper side
self.fshift[rows - tx:rows, :] = 0
# lower side
self.fshift[0:tx, :] = 0
# left side
self.fshift[:, 0:ty] = 0
# right side
self.fshift[:, cols - ty:cols] = 0
|
[
"def",
"lowPassFilter",
"(",
"self",
",",
"threshold",
")",
":",
"if",
"not",
"threshold",
":",
"return",
"rows",
",",
"cols",
"=",
"self",
".",
"img",
".",
"shape",
"tx",
"=",
"int",
"(",
"cols",
"*",
"threshold",
"*",
"0.25",
")",
"ty",
"=",
"int",
"(",
"rows",
"*",
"threshold",
"*",
"0.25",
")",
"# upper side\r",
"self",
".",
"fshift",
"[",
"rows",
"-",
"tx",
":",
"rows",
",",
":",
"]",
"=",
"0",
"# lower side\r",
"self",
".",
"fshift",
"[",
"0",
":",
"tx",
",",
":",
"]",
"=",
"0",
"# left side\r",
"self",
".",
"fshift",
"[",
":",
",",
"0",
":",
"ty",
"]",
"=",
"0",
"# right side\r",
"self",
".",
"fshift",
"[",
":",
",",
"cols",
"-",
"ty",
":",
"cols",
"]",
"=",
"0"
] |
remove all high frequencies by setting boundary around a quarry in the middle
of the size (2*threshold)^2 to zero
threshold = 0...1
|
[
"remove",
"all",
"high",
"frequencies",
"by",
"setting",
"boundary",
"around",
"a",
"quarry",
"in",
"the",
"middle",
"of",
"the",
"size",
"(",
"2",
"*",
"threshold",
")",
"^2",
"to",
"zero",
"threshold",
"=",
"0",
"...",
"1"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/FourierFilter.py#L43-L61
|
radjkarl/imgProcessor
|
imgProcessor/filters/FourierFilter.py
|
FourierFilter.reconstructImage
|
def reconstructImage(self):
'''
do inverse Fourier transform and return result
'''
f_ishift = np.fft.ifftshift(self.fshift)
return np.real(np.fft.ifft2(f_ishift))
|
python
|
def reconstructImage(self):
'''
do inverse Fourier transform and return result
'''
f_ishift = np.fft.ifftshift(self.fshift)
return np.real(np.fft.ifft2(f_ishift))
|
[
"def",
"reconstructImage",
"(",
"self",
")",
":",
"f_ishift",
"=",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"self",
".",
"fshift",
")",
"return",
"np",
".",
"real",
"(",
"np",
".",
"fft",
".",
"ifft2",
"(",
"f_ishift",
")",
")"
] |
do inverse Fourier transform and return result
|
[
"do",
"inverse",
"Fourier",
"transform",
"and",
"return",
"result"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/FourierFilter.py#L108-L113
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/interpolate2dUnstructuredIDW.py
|
interpolate2dUnstructuredIDW
|
def interpolate2dUnstructuredIDW(x, y, v, grid, power=2):
'''
x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution
'''
n = len(v)
gx = grid.shape[0]
gy = grid.shape[1]
for i in range(gx):
for j in range(gy):
overPx = False # if pixel position == point position
sumWi = 0.0
value = 0.0
for k in range(n):
xx = x[k]
yy = y[k]
vv = v[k]
if xx == i and yy == j:
grid[i, j] = vv
overPx = True
break
# weight from inverse distance:
wi = 1 / ((xx - i)**2 + (yy - j)**2)**(0.5 * power)
sumWi += wi
value += wi * vv
if not overPx:
grid[i, j] = value / sumWi
return grid
|
python
|
def interpolate2dUnstructuredIDW(x, y, v, grid, power=2):
'''
x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution
'''
n = len(v)
gx = grid.shape[0]
gy = grid.shape[1]
for i in range(gx):
for j in range(gy):
overPx = False # if pixel position == point position
sumWi = 0.0
value = 0.0
for k in range(n):
xx = x[k]
yy = y[k]
vv = v[k]
if xx == i and yy == j:
grid[i, j] = vv
overPx = True
break
# weight from inverse distance:
wi = 1 / ((xx - i)**2 + (yy - j)**2)**(0.5 * power)
sumWi += wi
value += wi * vv
if not overPx:
grid[i, j] = value / sumWi
return grid
|
[
"def",
"interpolate2dUnstructuredIDW",
"(",
"x",
",",
"y",
",",
"v",
",",
"grid",
",",
"power",
"=",
"2",
")",
":",
"n",
"=",
"len",
"(",
"v",
")",
"gx",
"=",
"grid",
".",
"shape",
"[",
"0",
"]",
"gy",
"=",
"grid",
".",
"shape",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"gx",
")",
":",
"for",
"j",
"in",
"range",
"(",
"gy",
")",
":",
"overPx",
"=",
"False",
"# if pixel position == point position\r",
"sumWi",
"=",
"0.0",
"value",
"=",
"0.0",
"for",
"k",
"in",
"range",
"(",
"n",
")",
":",
"xx",
"=",
"x",
"[",
"k",
"]",
"yy",
"=",
"y",
"[",
"k",
"]",
"vv",
"=",
"v",
"[",
"k",
"]",
"if",
"xx",
"==",
"i",
"and",
"yy",
"==",
"j",
":",
"grid",
"[",
"i",
",",
"j",
"]",
"=",
"vv",
"overPx",
"=",
"True",
"break",
"# weight from inverse distance:\r",
"wi",
"=",
"1",
"/",
"(",
"(",
"xx",
"-",
"i",
")",
"**",
"2",
"+",
"(",
"yy",
"-",
"j",
")",
"**",
"2",
")",
"**",
"(",
"0.5",
"*",
"power",
")",
"sumWi",
"+=",
"wi",
"value",
"+=",
"wi",
"*",
"vv",
"if",
"not",
"overPx",
":",
"grid",
"[",
"i",
",",
"j",
"]",
"=",
"value",
"/",
"sumWi",
"return",
"grid"
] |
x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution
|
[
"x",
"y",
"v",
"--",
">",
"1d",
"numpy",
".",
"array",
"grid",
"--",
">",
"2d",
"numpy",
".",
"array",
"fast",
"if",
"number",
"of",
"given",
"values",
"is",
"small",
"relative",
"to",
"grid",
"resolution"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dUnstructuredIDW.py#L8-L38
|
radjkarl/imgProcessor
|
imgProcessor/features/hog.py
|
hog
|
def hog(image, orientations=8, ksize=(5, 5)):
'''
returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options
'''
s0, s1 = image.shape[:2]
# speed up the process through saving generated kernels:
try:
k = hog.kernels[str(ksize) + str(orientations)]
except KeyError:
k = _mkConvKernel(ksize, orientations)
hog.kernels[str(ksize) + str(orientations)] = k
out = np.empty(shape=(s0, s1, orientations))
image[np.isnan(image)] = 0
for i in range(orientations):
out[:, :, i] = convolve(image, k[i])
return out
|
python
|
def hog(image, orientations=8, ksize=(5, 5)):
'''
returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options
'''
s0, s1 = image.shape[:2]
# speed up the process through saving generated kernels:
try:
k = hog.kernels[str(ksize) + str(orientations)]
except KeyError:
k = _mkConvKernel(ksize, orientations)
hog.kernels[str(ksize) + str(orientations)] = k
out = np.empty(shape=(s0, s1, orientations))
image[np.isnan(image)] = 0
for i in range(orientations):
out[:, :, i] = convolve(image, k[i])
return out
|
[
"def",
"hog",
"(",
"image",
",",
"orientations",
"=",
"8",
",",
"ksize",
"=",
"(",
"5",
",",
"5",
")",
")",
":",
"s0",
",",
"s1",
"=",
"image",
".",
"shape",
"[",
":",
"2",
"]",
"# speed up the process through saving generated kernels:\r",
"try",
":",
"k",
"=",
"hog",
".",
"kernels",
"[",
"str",
"(",
"ksize",
")",
"+",
"str",
"(",
"orientations",
")",
"]",
"except",
"KeyError",
":",
"k",
"=",
"_mkConvKernel",
"(",
"ksize",
",",
"orientations",
")",
"hog",
".",
"kernels",
"[",
"str",
"(",
"ksize",
")",
"+",
"str",
"(",
"orientations",
")",
"]",
"=",
"k",
"out",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"s0",
",",
"s1",
",",
"orientations",
")",
")",
"image",
"[",
"np",
".",
"isnan",
"(",
"image",
")",
"]",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"orientations",
")",
":",
"out",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"convolve",
"(",
"image",
",",
"k",
"[",
"i",
"]",
")",
"return",
"out"
] |
returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options
|
[
"returns",
"the",
"Histogram",
"of",
"Oriented",
"Gradients",
":",
"param",
"ksize",
":",
"convolution",
"kernel",
"size",
"as",
"(",
"y",
"x",
")",
"-",
"needs",
"to",
"be",
"odd",
":",
"param",
"orientations",
":",
"number",
"of",
"orientations",
"in",
"between",
"rad",
"=",
"0",
"and",
"rad",
"=",
"pi",
"similar",
"to",
"http",
":",
"//",
"scikit",
"-",
"image",
".",
"org",
"/",
"docs",
"/",
"dev",
"/",
"auto_examples",
"/",
"plot_hog",
".",
"html",
"but",
"faster",
"and",
"with",
"less",
"options"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/hog.py#L40-L64
|
radjkarl/imgProcessor
|
imgProcessor/features/hog.py
|
visualize
|
def visualize(hog, grid=(10, 10), radCircle=None):
'''
visualize HOG as polynomial around cell center
for [grid] * cells
'''
s0, s1, nang = hog.shape
angles = np.linspace(0, np.pi, nang + 1)[:-1]
# center of each sub array:
cx, cy = s0 // (2 * grid[0]), s1 // (2 * grid[1])
# max. radius of polynomial around cenetr:
rx, ry = cx, cy
# for drawing a position indicator (circle):
if radCircle is None:
radCircle = max(1, rx // 10)
# output array:
out = np.zeros((s0, s1), dtype=np.uint8)
# point of polynomial:
pts = np.empty(shape=(1, 2 * nang, 2), dtype=np.int32)
# takes grid[0]*grid[1] sample HOG values:
samplesHOG = subCell2DFnArray(hog, lambda arr: arr[cx, cy], grid)
mxHOG = samplesHOG.max()
# sub array slices:
slices = list(subCell2DSlices(out, grid))
m = 0
for m, hhh in enumerate(samplesHOG.reshape(grid[0] * grid[1], nang)):
hhmax = hhh.max()
hh = hhh / hhmax
sout = out[slices[m][2:4]]
for n, (o, a) in enumerate(zip(hh, angles)):
pts[0, n, 0] = cx + np.cos(a) * o * rx
pts[0, n, 1] = cy + np.sin(a) * o * ry
pts[0, n + nang, 0] = cx + np.cos(a + np.pi) * o * rx
pts[0, n + nang, 1] = cy + np.sin(a + np.pi) * o * ry
cv2.fillPoly(sout, pts, int(255 * hhmax / mxHOG))
cv2.circle(sout, (cx, cy), radCircle, 0, thickness=-1)
return out
|
python
|
def visualize(hog, grid=(10, 10), radCircle=None):
'''
visualize HOG as polynomial around cell center
for [grid] * cells
'''
s0, s1, nang = hog.shape
angles = np.linspace(0, np.pi, nang + 1)[:-1]
# center of each sub array:
cx, cy = s0 // (2 * grid[0]), s1 // (2 * grid[1])
# max. radius of polynomial around cenetr:
rx, ry = cx, cy
# for drawing a position indicator (circle):
if radCircle is None:
radCircle = max(1, rx // 10)
# output array:
out = np.zeros((s0, s1), dtype=np.uint8)
# point of polynomial:
pts = np.empty(shape=(1, 2 * nang, 2), dtype=np.int32)
# takes grid[0]*grid[1] sample HOG values:
samplesHOG = subCell2DFnArray(hog, lambda arr: arr[cx, cy], grid)
mxHOG = samplesHOG.max()
# sub array slices:
slices = list(subCell2DSlices(out, grid))
m = 0
for m, hhh in enumerate(samplesHOG.reshape(grid[0] * grid[1], nang)):
hhmax = hhh.max()
hh = hhh / hhmax
sout = out[slices[m][2:4]]
for n, (o, a) in enumerate(zip(hh, angles)):
pts[0, n, 0] = cx + np.cos(a) * o * rx
pts[0, n, 1] = cy + np.sin(a) * o * ry
pts[0, n + nang, 0] = cx + np.cos(a + np.pi) * o * rx
pts[0, n + nang, 1] = cy + np.sin(a + np.pi) * o * ry
cv2.fillPoly(sout, pts, int(255 * hhmax / mxHOG))
cv2.circle(sout, (cx, cy), radCircle, 0, thickness=-1)
return out
|
[
"def",
"visualize",
"(",
"hog",
",",
"grid",
"=",
"(",
"10",
",",
"10",
")",
",",
"radCircle",
"=",
"None",
")",
":",
"s0",
",",
"s1",
",",
"nang",
"=",
"hog",
".",
"shape",
"angles",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"np",
".",
"pi",
",",
"nang",
"+",
"1",
")",
"[",
":",
"-",
"1",
"]",
"# center of each sub array:\r",
"cx",
",",
"cy",
"=",
"s0",
"//",
"(",
"2",
"*",
"grid",
"[",
"0",
"]",
")",
",",
"s1",
"//",
"(",
"2",
"*",
"grid",
"[",
"1",
"]",
")",
"# max. radius of polynomial around cenetr:\r",
"rx",
",",
"ry",
"=",
"cx",
",",
"cy",
"# for drawing a position indicator (circle):\r",
"if",
"radCircle",
"is",
"None",
":",
"radCircle",
"=",
"max",
"(",
"1",
",",
"rx",
"//",
"10",
")",
"# output array:\r",
"out",
"=",
"np",
".",
"zeros",
"(",
"(",
"s0",
",",
"s1",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"# point of polynomial:\r",
"pts",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"1",
",",
"2",
"*",
"nang",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"# takes grid[0]*grid[1] sample HOG values:\r",
"samplesHOG",
"=",
"subCell2DFnArray",
"(",
"hog",
",",
"lambda",
"arr",
":",
"arr",
"[",
"cx",
",",
"cy",
"]",
",",
"grid",
")",
"mxHOG",
"=",
"samplesHOG",
".",
"max",
"(",
")",
"# sub array slices:\r",
"slices",
"=",
"list",
"(",
"subCell2DSlices",
"(",
"out",
",",
"grid",
")",
")",
"m",
"=",
"0",
"for",
"m",
",",
"hhh",
"in",
"enumerate",
"(",
"samplesHOG",
".",
"reshape",
"(",
"grid",
"[",
"0",
"]",
"*",
"grid",
"[",
"1",
"]",
",",
"nang",
")",
")",
":",
"hhmax",
"=",
"hhh",
".",
"max",
"(",
")",
"hh",
"=",
"hhh",
"/",
"hhmax",
"sout",
"=",
"out",
"[",
"slices",
"[",
"m",
"]",
"[",
"2",
":",
"4",
"]",
"]",
"for",
"n",
",",
"(",
"o",
",",
"a",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"hh",
",",
"angles",
")",
")",
":",
"pts",
"[",
"0",
",",
"n",
",",
"0",
"]",
"=",
"cx",
"+",
"np",
".",
"cos",
"(",
"a",
")",
"*",
"o",
"*",
"rx",
"pts",
"[",
"0",
",",
"n",
",",
"1",
"]",
"=",
"cy",
"+",
"np",
".",
"sin",
"(",
"a",
")",
"*",
"o",
"*",
"ry",
"pts",
"[",
"0",
",",
"n",
"+",
"nang",
",",
"0",
"]",
"=",
"cx",
"+",
"np",
".",
"cos",
"(",
"a",
"+",
"np",
".",
"pi",
")",
"*",
"o",
"*",
"rx",
"pts",
"[",
"0",
",",
"n",
"+",
"nang",
",",
"1",
"]",
"=",
"cy",
"+",
"np",
".",
"sin",
"(",
"a",
"+",
"np",
".",
"pi",
")",
"*",
"o",
"*",
"ry",
"cv2",
".",
"fillPoly",
"(",
"sout",
",",
"pts",
",",
"int",
"(",
"255",
"*",
"hhmax",
"/",
"mxHOG",
")",
")",
"cv2",
".",
"circle",
"(",
"sout",
",",
"(",
"cx",
",",
"cy",
")",
",",
"radCircle",
",",
"0",
",",
"thickness",
"=",
"-",
"1",
")",
"return",
"out"
] |
visualize HOG as polynomial around cell center
for [grid] * cells
|
[
"visualize",
"HOG",
"as",
"polynomial",
"around",
"cell",
"center",
"for",
"[",
"grid",
"]",
"*",
"cells"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/hog.py#L68-L105
|
radjkarl/imgProcessor
|
imgProcessor/camera/flatField/postProcessing.py
|
postProcessing
|
def postProcessing(arr, method='KW replace + Gauss', mask=None):
'''
Post process measured flat field [arr].
Depending on the measurement, different
post processing [method]s are beneficial.
The available methods are presented in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
methods:
'POLY replace' --> replace [arr] with a 2d polynomial fit
'KW replace' --> ... a fitted Kang-Weiss function
'AoV replace' --> ... a fitted Angle-of-view function
'POLY repair' --> same as above but either replacing empty
'KW repair' areas of smoothing out high gradient
'AoV repair' variations (POLY only)
'KW repair + Gauss' --> same as 'KW replace' with additional
'KW repair + Median' Gaussian or Median filter
mask:
None/2darray(bool) --> array of same shape ar [arr] indicating
invalid or empty positions
'''
assert method in ppMETHODS, \
'post processing method (%s) must be one of %s' % (method, ppMETHODS)
if method == 'POLY replace':
return polyfit2dGrid(arr, mask, order=2, replace_all=True)
elif method == 'KW replace':
return function(arr, mask, replace_all=True)
elif method == 'POLY repair':
return polynomial(arr, mask, replace_all=False)
elif method == 'KW repair':
return function(arr, mask, replace_all=False)
elif method == 'KW repair + Median':
return median_filter(function(arr, mask, replace_all=False),
min(method.shape) // 20)
elif method == 'KW repair + Gauss':
return gaussian_filter(function(arr, mask, replace_all=False),
min(arr.shape) // 20)
elif method == 'AoV repair':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, method.shape, a=a), guess=(0.01),
down_scale_factor=1)
elif method == 'AoV replace':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, arr.shape, a=a), guess=(0.01),
replace_all=True, down_scale_factor=1)
|
python
|
def postProcessing(arr, method='KW replace + Gauss', mask=None):
'''
Post process measured flat field [arr].
Depending on the measurement, different
post processing [method]s are beneficial.
The available methods are presented in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
methods:
'POLY replace' --> replace [arr] with a 2d polynomial fit
'KW replace' --> ... a fitted Kang-Weiss function
'AoV replace' --> ... a fitted Angle-of-view function
'POLY repair' --> same as above but either replacing empty
'KW repair' areas of smoothing out high gradient
'AoV repair' variations (POLY only)
'KW repair + Gauss' --> same as 'KW replace' with additional
'KW repair + Median' Gaussian or Median filter
mask:
None/2darray(bool) --> array of same shape ar [arr] indicating
invalid or empty positions
'''
assert method in ppMETHODS, \
'post processing method (%s) must be one of %s' % (method, ppMETHODS)
if method == 'POLY replace':
return polyfit2dGrid(arr, mask, order=2, replace_all=True)
elif method == 'KW replace':
return function(arr, mask, replace_all=True)
elif method == 'POLY repair':
return polynomial(arr, mask, replace_all=False)
elif method == 'KW repair':
return function(arr, mask, replace_all=False)
elif method == 'KW repair + Median':
return median_filter(function(arr, mask, replace_all=False),
min(method.shape) // 20)
elif method == 'KW repair + Gauss':
return gaussian_filter(function(arr, mask, replace_all=False),
min(arr.shape) // 20)
elif method == 'AoV repair':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, method.shape, a=a), guess=(0.01),
down_scale_factor=1)
elif method == 'AoV replace':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, arr.shape, a=a), guess=(0.01),
replace_all=True, down_scale_factor=1)
|
[
"def",
"postProcessing",
"(",
"arr",
",",
"method",
"=",
"'KW replace + Gauss'",
",",
"mask",
"=",
"None",
")",
":",
"assert",
"method",
"in",
"ppMETHODS",
",",
"'post processing method (%s) must be one of %s'",
"%",
"(",
"method",
",",
"ppMETHODS",
")",
"if",
"method",
"==",
"'POLY replace'",
":",
"return",
"polyfit2dGrid",
"(",
"arr",
",",
"mask",
",",
"order",
"=",
"2",
",",
"replace_all",
"=",
"True",
")",
"elif",
"method",
"==",
"'KW replace'",
":",
"return",
"function",
"(",
"arr",
",",
"mask",
",",
"replace_all",
"=",
"True",
")",
"elif",
"method",
"==",
"'POLY repair'",
":",
"return",
"polynomial",
"(",
"arr",
",",
"mask",
",",
"replace_all",
"=",
"False",
")",
"elif",
"method",
"==",
"'KW repair'",
":",
"return",
"function",
"(",
"arr",
",",
"mask",
",",
"replace_all",
"=",
"False",
")",
"elif",
"method",
"==",
"'KW repair + Median'",
":",
"return",
"median_filter",
"(",
"function",
"(",
"arr",
",",
"mask",
",",
"replace_all",
"=",
"False",
")",
",",
"min",
"(",
"method",
".",
"shape",
")",
"//",
"20",
")",
"elif",
"method",
"==",
"'KW repair + Gauss'",
":",
"return",
"gaussian_filter",
"(",
"function",
"(",
"arr",
",",
"mask",
",",
"replace_all",
"=",
"False",
")",
",",
"min",
"(",
"arr",
".",
"shape",
")",
"//",
"20",
")",
"elif",
"method",
"==",
"'AoV repair'",
":",
"return",
"function",
"(",
"arr",
",",
"mask",
",",
"fn",
"=",
"lambda",
"XY",
",",
"a",
":",
"angleOfView",
"(",
"XY",
",",
"method",
".",
"shape",
",",
"a",
"=",
"a",
")",
",",
"guess",
"=",
"(",
"0.01",
")",
",",
"down_scale_factor",
"=",
"1",
")",
"elif",
"method",
"==",
"'AoV replace'",
":",
"return",
"function",
"(",
"arr",
",",
"mask",
",",
"fn",
"=",
"lambda",
"XY",
",",
"a",
":",
"angleOfView",
"(",
"XY",
",",
"arr",
".",
"shape",
",",
"a",
"=",
"a",
")",
",",
"guess",
"=",
"(",
"0.01",
")",
",",
"replace_all",
"=",
"True",
",",
"down_scale_factor",
"=",
"1",
")"
] |
Post process measured flat field [arr].
Depending on the measurement, different
post processing [method]s are beneficial.
The available methods are presented in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
methods:
'POLY replace' --> replace [arr] with a 2d polynomial fit
'KW replace' --> ... a fitted Kang-Weiss function
'AoV replace' --> ... a fitted Angle-of-view function
'POLY repair' --> same as above but either replacing empty
'KW repair' areas of smoothing out high gradient
'AoV repair' variations (POLY only)
'KW repair + Gauss' --> same as 'KW replace' with additional
'KW repair + Median' Gaussian or Median filter
mask:
None/2darray(bool) --> array of same shape ar [arr] indicating
invalid or empty positions
|
[
"Post",
"process",
"measured",
"flat",
"field",
"[",
"arr",
"]",
".",
"Depending",
"on",
"the",
"measurement",
"different",
"post",
"processing",
"[",
"method",
"]",
"s",
"are",
"beneficial",
".",
"The",
"available",
"methods",
"are",
"presented",
"in",
"---",
"K",
".",
"Bedrich",
"M",
".",
"Bokalic",
"et",
"al",
".",
":",
"ELECTROLUMINESCENCE",
"IMAGING",
"OF",
"PV",
"DEVICES",
":",
"ADVANCED",
"FLAT",
"FIELD",
"CALIBRATION",
"2017",
"---",
"methods",
":",
"POLY",
"replace",
"--",
">",
"replace",
"[",
"arr",
"]",
"with",
"a",
"2d",
"polynomial",
"fit",
"KW",
"replace",
"--",
">",
"...",
"a",
"fitted",
"Kang",
"-",
"Weiss",
"function",
"AoV",
"replace",
"--",
">",
"...",
"a",
"fitted",
"Angle",
"-",
"of",
"-",
"view",
"function",
"POLY",
"repair",
"--",
">",
"same",
"as",
"above",
"but",
"either",
"replacing",
"empty",
"KW",
"repair",
"areas",
"of",
"smoothing",
"out",
"high",
"gradient",
"AoV",
"repair",
"variations",
"(",
"POLY",
"only",
")",
"KW",
"repair",
"+",
"Gauss",
"--",
">",
"same",
"as",
"KW",
"replace",
"with",
"additional",
"KW",
"repair",
"+",
"Median",
"Gaussian",
"or",
"Median",
"filter",
"mask",
":",
"None",
"/",
"2darray",
"(",
"bool",
")",
"--",
">",
"array",
"of",
"same",
"shape",
"ar",
"[",
"arr",
"]",
"indicating",
"invalid",
"or",
"empty",
"positions"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/postProcessing.py#L16-L74
|
radjkarl/imgProcessor
|
imgProcessor/transform/rmBorder.py
|
rmBorder
|
def rmBorder(img, border=None):
'''
border [None], if images are corrected and device ends at
image border
[one number] (like 50),
if there is an equally spaced border
around the device
[two tuples] like ((50,60),(1500,900))
means ((Xfrom,Yfrom),(Xto, Yto))
[four tuples] like ((x0,y0),(x1,y1),...(x3,y3))
'''
if border is None:
pass
elif len(border) == 2:
s0 = slice(border[0][1], border[1][1])
s1 = slice(border[0][0], border[1][0])
img = img[s0, s1]
elif len(border) == 4:
# eval whether border values are orthogonal:
x = np.unique(border[:, 0])
y = np.unique(border[:, 1])
if len(x) == 2 and len(y) == 2:
s0 = slice(y[0], y[1])
s1 = slice(x[0], x[1])
img = img[s0, s1]
else:
# edges are irregular:
img = simplePerspectiveTransform(img, border)
else:
raise Exception('[border] input wrong')
return img
|
python
|
def rmBorder(img, border=None):
'''
border [None], if images are corrected and device ends at
image border
[one number] (like 50),
if there is an equally spaced border
around the device
[two tuples] like ((50,60),(1500,900))
means ((Xfrom,Yfrom),(Xto, Yto))
[four tuples] like ((x0,y0),(x1,y1),...(x3,y3))
'''
if border is None:
pass
elif len(border) == 2:
s0 = slice(border[0][1], border[1][1])
s1 = slice(border[0][0], border[1][0])
img = img[s0, s1]
elif len(border) == 4:
# eval whether border values are orthogonal:
x = np.unique(border[:, 0])
y = np.unique(border[:, 1])
if len(x) == 2 and len(y) == 2:
s0 = slice(y[0], y[1])
s1 = slice(x[0], x[1])
img = img[s0, s1]
else:
# edges are irregular:
img = simplePerspectiveTransform(img, border)
else:
raise Exception('[border] input wrong')
return img
|
[
"def",
"rmBorder",
"(",
"img",
",",
"border",
"=",
"None",
")",
":",
"if",
"border",
"is",
"None",
":",
"pass",
"elif",
"len",
"(",
"border",
")",
"==",
"2",
":",
"s0",
"=",
"slice",
"(",
"border",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"border",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"s1",
"=",
"slice",
"(",
"border",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"border",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"img",
"=",
"img",
"[",
"s0",
",",
"s1",
"]",
"elif",
"len",
"(",
"border",
")",
"==",
"4",
":",
"# eval whether border values are orthogonal:\r",
"x",
"=",
"np",
".",
"unique",
"(",
"border",
"[",
":",
",",
"0",
"]",
")",
"y",
"=",
"np",
".",
"unique",
"(",
"border",
"[",
":",
",",
"1",
"]",
")",
"if",
"len",
"(",
"x",
")",
"==",
"2",
"and",
"len",
"(",
"y",
")",
"==",
"2",
":",
"s0",
"=",
"slice",
"(",
"y",
"[",
"0",
"]",
",",
"y",
"[",
"1",
"]",
")",
"s1",
"=",
"slice",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
")",
"img",
"=",
"img",
"[",
"s0",
",",
"s1",
"]",
"else",
":",
"# edges are irregular:\r",
"img",
"=",
"simplePerspectiveTransform",
"(",
"img",
",",
"border",
")",
"else",
":",
"raise",
"Exception",
"(",
"'[border] input wrong'",
")",
"return",
"img"
] |
border [None], if images are corrected and device ends at
image border
[one number] (like 50),
if there is an equally spaced border
around the device
[two tuples] like ((50,60),(1500,900))
means ((Xfrom,Yfrom),(Xto, Yto))
[four tuples] like ((x0,y0),(x1,y1),...(x3,y3))
|
[
"border",
"[",
"None",
"]",
"if",
"images",
"are",
"corrected",
"and",
"device",
"ends",
"at",
"image",
"border",
"[",
"one",
"number",
"]",
"(",
"like",
"50",
")",
"if",
"there",
"is",
"an",
"equally",
"spaced",
"border",
"around",
"the",
"device",
"[",
"two",
"tuples",
"]",
"like",
"((",
"50",
"60",
")",
"(",
"1500",
"900",
"))",
"means",
"((",
"Xfrom",
"Yfrom",
")",
"(",
"Xto",
"Yto",
"))",
"[",
"four",
"tuples",
"]",
"like",
"((",
"x0",
"y0",
")",
"(",
"x1",
"y1",
")",
"...",
"(",
"x3",
"y3",
"))"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/rmBorder.py#L6-L36
|
radjkarl/imgProcessor
|
imgProcessor/features/SingleTimeEffectDetection.py
|
SingleTimeEffectDetection.addImage
|
def addImage(self, image, mask=None):
'''
#########
mask -- optional
'''
self._last_diff = diff = image - self.noSTE
ste = diff > self.threshold
removeSinglePixels(ste)
self.mask_clean = clean = ~ste
if mask is not None:
clean = np.logical_and(mask, clean)
self.mma.update(image, clean)
if self.save_ste_indices:
self.mask_STE += ste
return self
|
python
|
def addImage(self, image, mask=None):
'''
#########
mask -- optional
'''
self._last_diff = diff = image - self.noSTE
ste = diff > self.threshold
removeSinglePixels(ste)
self.mask_clean = clean = ~ste
if mask is not None:
clean = np.logical_and(mask, clean)
self.mma.update(image, clean)
if self.save_ste_indices:
self.mask_STE += ste
return self
|
[
"def",
"addImage",
"(",
"self",
",",
"image",
",",
"mask",
"=",
"None",
")",
":",
"self",
".",
"_last_diff",
"=",
"diff",
"=",
"image",
"-",
"self",
".",
"noSTE",
"ste",
"=",
"diff",
">",
"self",
".",
"threshold",
"removeSinglePixels",
"(",
"ste",
")",
"self",
".",
"mask_clean",
"=",
"clean",
"=",
"~",
"ste",
"if",
"mask",
"is",
"not",
"None",
":",
"clean",
"=",
"np",
".",
"logical_and",
"(",
"mask",
",",
"clean",
")",
"self",
".",
"mma",
".",
"update",
"(",
"image",
",",
"clean",
")",
"if",
"self",
".",
"save_ste_indices",
":",
"self",
".",
"mask_STE",
"+=",
"ste",
"return",
"self"
] |
#########
mask -- optional
|
[
"#########",
"mask",
"--",
"optional"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/SingleTimeEffectDetection.py#L55-L75
|
radjkarl/imgProcessor
|
imgProcessor/features/SingleTimeEffectDetection.py
|
SingleTimeEffectDetection.relativeAreaSTE
|
def relativeAreaSTE(self):
'''
return STE area - relative to image area
'''
s = self.noSTE.shape
return np.sum(self.mask_STE) / (s[0] * s[1])
|
python
|
def relativeAreaSTE(self):
'''
return STE area - relative to image area
'''
s = self.noSTE.shape
return np.sum(self.mask_STE) / (s[0] * s[1])
|
[
"def",
"relativeAreaSTE",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"noSTE",
".",
"shape",
"return",
"np",
".",
"sum",
"(",
"self",
".",
"mask_STE",
")",
"/",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
")"
] |
return STE area - relative to image area
|
[
"return",
"STE",
"area",
"-",
"relative",
"to",
"image",
"area"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/SingleTimeEffectDetection.py#L83-L88
|
radjkarl/imgProcessor
|
imgProcessor/features/SingleTimeEffectDetection.py
|
SingleTimeEffectDetection.intensityDistributionSTE
|
def intensityDistributionSTE(self, bins=10, range=None):
'''
return distribution of STE intensity
'''
v = np.abs(self._last_diff[self.mask_STE])
return np.histogram(v, bins, range)
|
python
|
def intensityDistributionSTE(self, bins=10, range=None):
'''
return distribution of STE intensity
'''
v = np.abs(self._last_diff[self.mask_STE])
return np.histogram(v, bins, range)
|
[
"def",
"intensityDistributionSTE",
"(",
"self",
",",
"bins",
"=",
"10",
",",
"range",
"=",
"None",
")",
":",
"v",
"=",
"np",
".",
"abs",
"(",
"self",
".",
"_last_diff",
"[",
"self",
".",
"mask_STE",
"]",
")",
"return",
"np",
".",
"histogram",
"(",
"v",
",",
"bins",
",",
"range",
")"
] |
return distribution of STE intensity
|
[
"return",
"distribution",
"of",
"STE",
"intensity"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/SingleTimeEffectDetection.py#L90-L95
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
toUIntArray
|
def toUIntArray(img, dtype=None, cutNegative=True, cutHigh=True,
range=None, copy=True):
'''
transform a float to an unsigned integer array of a fitting dtype
adds an offset, to get rid of negative values
range = (min, max) - scale values between given range
cutNegative - all values <0 will be set to 0
cutHigh - set to False to rather scale values to fit
'''
mn, mx = None, None
if range is not None:
mn, mx = range
if dtype is None:
if mx is None:
mx = np.nanmax(img)
dtype = np.uint16 if mx > 255 else np.uint8
dtype = np.dtype(dtype)
if dtype == img.dtype:
return img
# get max px value:
b = {'uint8': 255,
'uint16': 65535,
'uint32': 4294967295,
'uint64': 18446744073709551615}[dtype.name]
if copy:
img = img.copy()
if range is not None:
img = np.asfarray(img)
img -= mn
# img[img<0]=0
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img *= b / (mx - mn)
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img = np.clip(img, 0, b)
else:
if cutNegative:
img[img < 0] = 0
else:
# add an offset to all values:
mn = np.min(img)
if mn < 0:
img -= mn # set minimum to 0
if cutHigh:
#ind = img > b
img[img > b] = b
else:
# scale values
mx = np.nanmax(img)
img = np.asfarray(img) * (float(b) / mx)
img = img.astype(dtype)
# if range is not None and cutHigh:
# img[ind] = b
return img
|
python
|
def toUIntArray(img, dtype=None, cutNegative=True, cutHigh=True,
range=None, copy=True):
'''
transform a float to an unsigned integer array of a fitting dtype
adds an offset, to get rid of negative values
range = (min, max) - scale values between given range
cutNegative - all values <0 will be set to 0
cutHigh - set to False to rather scale values to fit
'''
mn, mx = None, None
if range is not None:
mn, mx = range
if dtype is None:
if mx is None:
mx = np.nanmax(img)
dtype = np.uint16 if mx > 255 else np.uint8
dtype = np.dtype(dtype)
if dtype == img.dtype:
return img
# get max px value:
b = {'uint8': 255,
'uint16': 65535,
'uint32': 4294967295,
'uint64': 18446744073709551615}[dtype.name]
if copy:
img = img.copy()
if range is not None:
img = np.asfarray(img)
img -= mn
# img[img<0]=0
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img *= b / (mx - mn)
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img = np.clip(img, 0, b)
else:
if cutNegative:
img[img < 0] = 0
else:
# add an offset to all values:
mn = np.min(img)
if mn < 0:
img -= mn # set minimum to 0
if cutHigh:
#ind = img > b
img[img > b] = b
else:
# scale values
mx = np.nanmax(img)
img = np.asfarray(img) * (float(b) / mx)
img = img.astype(dtype)
# if range is not None and cutHigh:
# img[ind] = b
return img
|
[
"def",
"toUIntArray",
"(",
"img",
",",
"dtype",
"=",
"None",
",",
"cutNegative",
"=",
"True",
",",
"cutHigh",
"=",
"True",
",",
"range",
"=",
"None",
",",
"copy",
"=",
"True",
")",
":",
"mn",
",",
"mx",
"=",
"None",
",",
"None",
"if",
"range",
"is",
"not",
"None",
":",
"mn",
",",
"mx",
"=",
"range",
"if",
"dtype",
"is",
"None",
":",
"if",
"mx",
"is",
"None",
":",
"mx",
"=",
"np",
".",
"nanmax",
"(",
"img",
")",
"dtype",
"=",
"np",
".",
"uint16",
"if",
"mx",
">",
"255",
"else",
"np",
".",
"uint8",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"if",
"dtype",
"==",
"img",
".",
"dtype",
":",
"return",
"img",
"# get max px value:\r",
"b",
"=",
"{",
"'uint8'",
":",
"255",
",",
"'uint16'",
":",
"65535",
",",
"'uint32'",
":",
"4294967295",
",",
"'uint64'",
":",
"18446744073709551615",
"}",
"[",
"dtype",
".",
"name",
"]",
"if",
"copy",
":",
"img",
"=",
"img",
".",
"copy",
"(",
")",
"if",
"range",
"is",
"not",
"None",
":",
"img",
"=",
"np",
".",
"asfarray",
"(",
"img",
")",
"img",
"-=",
"mn",
"# img[img<0]=0\r",
"# print np.nanmin(img), np.nanmax(img), mn, mx, range, b\r",
"img",
"*=",
"b",
"/",
"(",
"mx",
"-",
"mn",
")",
"# print np.nanmin(img), np.nanmax(img), mn, mx, range, b\r",
"img",
"=",
"np",
".",
"clip",
"(",
"img",
",",
"0",
",",
"b",
")",
"else",
":",
"if",
"cutNegative",
":",
"img",
"[",
"img",
"<",
"0",
"]",
"=",
"0",
"else",
":",
"# add an offset to all values:\r",
"mn",
"=",
"np",
".",
"min",
"(",
"img",
")",
"if",
"mn",
"<",
"0",
":",
"img",
"-=",
"mn",
"# set minimum to 0\r",
"if",
"cutHigh",
":",
"#ind = img > b\r",
"img",
"[",
"img",
">",
"b",
"]",
"=",
"b",
"else",
":",
"# scale values\r",
"mx",
"=",
"np",
".",
"nanmax",
"(",
"img",
")",
"img",
"=",
"np",
".",
"asfarray",
"(",
"img",
")",
"*",
"(",
"float",
"(",
"b",
")",
"/",
"mx",
")",
"img",
"=",
"img",
".",
"astype",
"(",
"dtype",
")",
"# if range is not None and cutHigh:\r",
"# img[ind] = b\r",
"return",
"img"
] |
transform a float to an unsigned integer array of a fitting dtype
adds an offset, to get rid of negative values
range = (min, max) - scale values between given range
cutNegative - all values <0 will be set to 0
cutHigh - set to False to rather scale values to fit
|
[
"transform",
"a",
"float",
"to",
"an",
"unsigned",
"integer",
"array",
"of",
"a",
"fitting",
"dtype",
"adds",
"an",
"offset",
"to",
"get",
"rid",
"of",
"negative",
"values",
"range",
"=",
"(",
"min",
"max",
")",
"-",
"scale",
"values",
"between",
"given",
"range",
"cutNegative",
"-",
"all",
"values",
"<0",
"will",
"be",
"set",
"to",
"0",
"cutHigh",
"-",
"set",
"to",
"False",
"to",
"rather",
"scale",
"values",
"to",
"fit"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L11-L75
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
toFloatArray
|
def toFloatArray(img):
'''
transform an unsigned integer array into a
float array of the right size
'''
_D = {1: np.float32, # uint8
2: np.float32, # uint16
4: np.float64, # uint32
8: np.float64} # uint64
return img.astype(_D[img.itemsize])
|
python
|
def toFloatArray(img):
'''
transform an unsigned integer array into a
float array of the right size
'''
_D = {1: np.float32, # uint8
2: np.float32, # uint16
4: np.float64, # uint32
8: np.float64} # uint64
return img.astype(_D[img.itemsize])
|
[
"def",
"toFloatArray",
"(",
"img",
")",
":",
"_D",
"=",
"{",
"1",
":",
"np",
".",
"float32",
",",
"# uint8\r",
"2",
":",
"np",
".",
"float32",
",",
"# uint16\r",
"4",
":",
"np",
".",
"float64",
",",
"# uint32\r",
"8",
":",
"np",
".",
"float64",
"}",
"# uint64\r",
"return",
"img",
".",
"astype",
"(",
"_D",
"[",
"img",
".",
"itemsize",
"]",
")"
] |
transform an unsigned integer array into a
float array of the right size
|
[
"transform",
"an",
"unsigned",
"integer",
"array",
"into",
"a",
"float",
"array",
"of",
"the",
"right",
"size"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L78-L87
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
toNoUintArray
|
def toNoUintArray(arr):
'''
cast array to the next higher integer array
if dtype=unsigned integer
'''
d = arr.dtype
if d.kind == 'u':
arr = arr.astype({1: np.int16,
2: np.int32,
4: np.int64}[d.itemsize])
return arr
|
python
|
def toNoUintArray(arr):
'''
cast array to the next higher integer array
if dtype=unsigned integer
'''
d = arr.dtype
if d.kind == 'u':
arr = arr.astype({1: np.int16,
2: np.int32,
4: np.int64}[d.itemsize])
return arr
|
[
"def",
"toNoUintArray",
"(",
"arr",
")",
":",
"d",
"=",
"arr",
".",
"dtype",
"if",
"d",
".",
"kind",
"==",
"'u'",
":",
"arr",
"=",
"arr",
".",
"astype",
"(",
"{",
"1",
":",
"np",
".",
"int16",
",",
"2",
":",
"np",
".",
"int32",
",",
"4",
":",
"np",
".",
"int64",
"}",
"[",
"d",
".",
"itemsize",
"]",
")",
"return",
"arr"
] |
cast array to the next higher integer array
if dtype=unsigned integer
|
[
"cast",
"array",
"to",
"the",
"next",
"higher",
"integer",
"array",
"if",
"dtype",
"=",
"unsigned",
"integer"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L90-L100
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
toGray
|
def toGray(img):
'''
weights see
https://en.wikipedia.org/wiki/Grayscale#Colorimetric_.28luminance-prese
http://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
'''
return np.average(img, axis=-1, weights=(0.299, # red
0.587, # green
0.114) # blue
).astype(img.dtype)
|
python
|
def toGray(img):
'''
weights see
https://en.wikipedia.org/wiki/Grayscale#Colorimetric_.28luminance-prese
http://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
'''
return np.average(img, axis=-1, weights=(0.299, # red
0.587, # green
0.114) # blue
).astype(img.dtype)
|
[
"def",
"toGray",
"(",
"img",
")",
":",
"return",
"np",
".",
"average",
"(",
"img",
",",
"axis",
"=",
"-",
"1",
",",
"weights",
"=",
"(",
"0.299",
",",
"# red\r",
"0.587",
",",
"# green\r",
"0.114",
")",
"# blue\r",
")",
".",
"astype",
"(",
"img",
".",
"dtype",
")"
] |
weights see
https://en.wikipedia.org/wiki/Grayscale#Colorimetric_.28luminance-prese
http://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
|
[
"weights",
"see",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Grayscale#Colorimetric_",
".",
"28luminance",
"-",
"prese",
"http",
":",
"//",
"docs",
".",
"opencv",
".",
"org",
"/",
"2",
".",
"4",
"/",
"modules",
"/",
"imgproc",
"/",
"doc",
"/",
"miscellaneous_transformations",
".",
"html#cvtcolor"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L126-L135
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
rgChromaticity
|
def rgChromaticity(img):
'''
returns the normalized RGB space (RGB/intensity)
see https://en.wikipedia.org/wiki/Rg_chromaticity
'''
out = _calc(img)
if img.dtype == np.uint8:
out = (255 * out).astype(np.uint8)
return out
|
python
|
def rgChromaticity(img):
'''
returns the normalized RGB space (RGB/intensity)
see https://en.wikipedia.org/wiki/Rg_chromaticity
'''
out = _calc(img)
if img.dtype == np.uint8:
out = (255 * out).astype(np.uint8)
return out
|
[
"def",
"rgChromaticity",
"(",
"img",
")",
":",
"out",
"=",
"_calc",
"(",
"img",
")",
"if",
"img",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"out",
"=",
"(",
"255",
"*",
"out",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"return",
"out"
] |
returns the normalized RGB space (RGB/intensity)
see https://en.wikipedia.org/wiki/Rg_chromaticity
|
[
"returns",
"the",
"normalized",
"RGB",
"space",
"(",
"RGB",
"/",
"intensity",
")",
"see",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Rg_chromaticity"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L138-L146
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
monochromaticWavelength
|
def monochromaticWavelength(img):
'''
TODO##########
'''
# peak wave lengths: https://en.wikipedia.org/wiki/RGB_color_model
out = _calc(img)
peakWavelengths = (570, 540, 440) # (r,g,b)
# s = sum(peakWavelengths)
for n, p in enumerate(peakWavelengths):
out[..., n] *= p
return out.sum(axis=2)
|
python
|
def monochromaticWavelength(img):
'''
TODO##########
'''
# peak wave lengths: https://en.wikipedia.org/wiki/RGB_color_model
out = _calc(img)
peakWavelengths = (570, 540, 440) # (r,g,b)
# s = sum(peakWavelengths)
for n, p in enumerate(peakWavelengths):
out[..., n] *= p
return out.sum(axis=2)
|
[
"def",
"monochromaticWavelength",
"(",
"img",
")",
":",
"# peak wave lengths: https://en.wikipedia.org/wiki/RGB_color_model\r",
"out",
"=",
"_calc",
"(",
"img",
")",
"peakWavelengths",
"=",
"(",
"570",
",",
"540",
",",
"440",
")",
"# (r,g,b)\r",
"# s = sum(peakWavelengths)\r",
"for",
"n",
",",
"p",
"in",
"enumerate",
"(",
"peakWavelengths",
")",
":",
"out",
"[",
"...",
",",
"n",
"]",
"*=",
"p",
"return",
"out",
".",
"sum",
"(",
"axis",
"=",
"2",
")"
] |
TODO##########
|
[
"TODO##########"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L157-L168
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
rot90
|
def rot90(img):
'''
rotate one or multiple grayscale or color images 90 degrees
'''
s = img.shape
if len(s) == 3:
if s[2] in (3, 4): # color image
out = np.empty((s[1], s[0], s[2]), dtype=img.dtype)
for i in range(s[2]):
out[:, :, i] = np.rot90(img[:, :, i])
else: # mutliple grayscale
out = np.empty((s[0], s[2], s[1]), dtype=img.dtype)
for i in range(s[0]):
out[i] = np.rot90(img[i])
elif len(s) == 2: # one grayscale
out = np.rot90(img)
elif len(s) == 4 and s[3] in (3, 4): # multiple color
out = np.empty((s[0], s[2], s[1], s[3]), dtype=img.dtype)
for i in range(s[0]): # for each img
for j in range(s[3]): # for each channel
out[i, :, :, j] = np.rot90(img[i, :, :, j])
else:
NotImplemented
return out
|
python
|
def rot90(img):
'''
rotate one or multiple grayscale or color images 90 degrees
'''
s = img.shape
if len(s) == 3:
if s[2] in (3, 4): # color image
out = np.empty((s[1], s[0], s[2]), dtype=img.dtype)
for i in range(s[2]):
out[:, :, i] = np.rot90(img[:, :, i])
else: # mutliple grayscale
out = np.empty((s[0], s[2], s[1]), dtype=img.dtype)
for i in range(s[0]):
out[i] = np.rot90(img[i])
elif len(s) == 2: # one grayscale
out = np.rot90(img)
elif len(s) == 4 and s[3] in (3, 4): # multiple color
out = np.empty((s[0], s[2], s[1], s[3]), dtype=img.dtype)
for i in range(s[0]): # for each img
for j in range(s[3]): # for each channel
out[i, :, :, j] = np.rot90(img[i, :, :, j])
else:
NotImplemented
return out
|
[
"def",
"rot90",
"(",
"img",
")",
":",
"s",
"=",
"img",
".",
"shape",
"if",
"len",
"(",
"s",
")",
"==",
"3",
":",
"if",
"s",
"[",
"2",
"]",
"in",
"(",
"3",
",",
"4",
")",
":",
"# color image\r",
"out",
"=",
"np",
".",
"empty",
"(",
"(",
"s",
"[",
"1",
"]",
",",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"2",
"]",
")",
",",
"dtype",
"=",
"img",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"s",
"[",
"2",
"]",
")",
":",
"out",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"np",
".",
"rot90",
"(",
"img",
"[",
":",
",",
":",
",",
"i",
"]",
")",
"else",
":",
"# mutliple grayscale\r",
"out",
"=",
"np",
".",
"empty",
"(",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"2",
"]",
",",
"s",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"img",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"s",
"[",
"0",
"]",
")",
":",
"out",
"[",
"i",
"]",
"=",
"np",
".",
"rot90",
"(",
"img",
"[",
"i",
"]",
")",
"elif",
"len",
"(",
"s",
")",
"==",
"2",
":",
"# one grayscale\r",
"out",
"=",
"np",
".",
"rot90",
"(",
"img",
")",
"elif",
"len",
"(",
"s",
")",
"==",
"4",
"and",
"s",
"[",
"3",
"]",
"in",
"(",
"3",
",",
"4",
")",
":",
"# multiple color\r",
"out",
"=",
"np",
".",
"empty",
"(",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"2",
"]",
",",
"s",
"[",
"1",
"]",
",",
"s",
"[",
"3",
"]",
")",
",",
"dtype",
"=",
"img",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"s",
"[",
"0",
"]",
")",
":",
"# for each img\r",
"for",
"j",
"in",
"range",
"(",
"s",
"[",
"3",
"]",
")",
":",
"# for each channel\r",
"out",
"[",
"i",
",",
":",
",",
":",
",",
"j",
"]",
"=",
"np",
".",
"rot90",
"(",
"img",
"[",
"i",
",",
":",
",",
":",
",",
"j",
"]",
")",
"else",
":",
"NotImplemented",
"return",
"out"
] |
rotate one or multiple grayscale or color images 90 degrees
|
[
"rotate",
"one",
"or",
"multiple",
"grayscale",
"or",
"color",
"images",
"90",
"degrees"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L186-L209
|
radjkarl/imgProcessor
|
imgProcessor/transformations.py
|
applyColorMap
|
def applyColorMap(gray, cmap='flame'):
'''
like cv2.applyColorMap(im_gray, cv2.COLORMAP_*) but with different color maps
'''
# TODO:implement more cmaps
if cmap != 'flame':
raise NotImplemented
# TODO: make better
mx = 256 # if gray.dtype==np.uint8 else 65535
lut = np.empty(shape=(256, 3))
cmap = (
# taken from pyqtgraph GradientEditorItem
(0, (0, 0, 0)),
(0.2, (7, 0, 220)),
(0.5, (236, 0, 134)),
(0.8, (246, 246, 0)),
(1.0, (255, 255, 255))
)
# build lookup table:
lastval, lastcol = cmap[0]
for step, col in cmap[1:]:
val = int(step * mx)
for i in range(3):
lut[lastval:val, i] = np.linspace(
lastcol[i], col[i], val - lastval)
lastcol = col
lastval = val
s0, s1 = gray.shape
out = np.empty(shape=(s0, s1, 3), dtype=np.uint8)
for i in range(3):
out[..., i] = cv2.LUT(gray, lut[:, i])
return out
|
python
|
def applyColorMap(gray, cmap='flame'):
'''
like cv2.applyColorMap(im_gray, cv2.COLORMAP_*) but with different color maps
'''
# TODO:implement more cmaps
if cmap != 'flame':
raise NotImplemented
# TODO: make better
mx = 256 # if gray.dtype==np.uint8 else 65535
lut = np.empty(shape=(256, 3))
cmap = (
# taken from pyqtgraph GradientEditorItem
(0, (0, 0, 0)),
(0.2, (7, 0, 220)),
(0.5, (236, 0, 134)),
(0.8, (246, 246, 0)),
(1.0, (255, 255, 255))
)
# build lookup table:
lastval, lastcol = cmap[0]
for step, col in cmap[1:]:
val = int(step * mx)
for i in range(3):
lut[lastval:val, i] = np.linspace(
lastcol[i], col[i], val - lastval)
lastcol = col
lastval = val
s0, s1 = gray.shape
out = np.empty(shape=(s0, s1, 3), dtype=np.uint8)
for i in range(3):
out[..., i] = cv2.LUT(gray, lut[:, i])
return out
|
[
"def",
"applyColorMap",
"(",
"gray",
",",
"cmap",
"=",
"'flame'",
")",
":",
"# TODO:implement more cmaps\r",
"if",
"cmap",
"!=",
"'flame'",
":",
"raise",
"NotImplemented",
"# TODO: make better\r",
"mx",
"=",
"256",
"# if gray.dtype==np.uint8 else 65535\r",
"lut",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"256",
",",
"3",
")",
")",
"cmap",
"=",
"(",
"# taken from pyqtgraph GradientEditorItem\r",
"(",
"0",
",",
"(",
"0",
",",
"0",
",",
"0",
")",
")",
",",
"(",
"0.2",
",",
"(",
"7",
",",
"0",
",",
"220",
")",
")",
",",
"(",
"0.5",
",",
"(",
"236",
",",
"0",
",",
"134",
")",
")",
",",
"(",
"0.8",
",",
"(",
"246",
",",
"246",
",",
"0",
")",
")",
",",
"(",
"1.0",
",",
"(",
"255",
",",
"255",
",",
"255",
")",
")",
")",
"# build lookup table:\r",
"lastval",
",",
"lastcol",
"=",
"cmap",
"[",
"0",
"]",
"for",
"step",
",",
"col",
"in",
"cmap",
"[",
"1",
":",
"]",
":",
"val",
"=",
"int",
"(",
"step",
"*",
"mx",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"lut",
"[",
"lastval",
":",
"val",
",",
"i",
"]",
"=",
"np",
".",
"linspace",
"(",
"lastcol",
"[",
"i",
"]",
",",
"col",
"[",
"i",
"]",
",",
"val",
"-",
"lastval",
")",
"lastcol",
"=",
"col",
"lastval",
"=",
"val",
"s0",
",",
"s1",
"=",
"gray",
".",
"shape",
"out",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"s0",
",",
"s1",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"out",
"[",
"...",
",",
"i",
"]",
"=",
"cv2",
".",
"LUT",
"(",
"gray",
",",
"lut",
"[",
":",
",",
"i",
"]",
")",
"return",
"out"
] |
like cv2.applyColorMap(im_gray, cv2.COLORMAP_*) but with different color maps
|
[
"like",
"cv2",
".",
"applyColorMap",
"(",
"im_gray",
"cv2",
".",
"COLORMAP_",
"*",
")",
"but",
"with",
"different",
"color",
"maps"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L212-L246
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
_insertDateIndex
|
def _insertDateIndex(date, l):
'''
returns the index to insert the given date in a list
where each items first value is a date
'''
return next((i for i, n in enumerate(l) if n[0] < date), len(l))
|
python
|
def _insertDateIndex(date, l):
'''
returns the index to insert the given date in a list
where each items first value is a date
'''
return next((i for i, n in enumerate(l) if n[0] < date), len(l))
|
[
"def",
"_insertDateIndex",
"(",
"date",
",",
"l",
")",
":",
"return",
"next",
"(",
"(",
"i",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"l",
")",
"if",
"n",
"[",
"0",
"]",
"<",
"date",
")",
",",
"len",
"(",
"l",
")",
")"
] |
returns the index to insert the given date in a list
where each items first value is a date
|
[
"returns",
"the",
"index",
"to",
"insert",
"the",
"given",
"date",
"in",
"a",
"list",
"where",
"each",
"items",
"first",
"value",
"is",
"a",
"date"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L29-L34
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
_getFromDate
|
def _getFromDate(l, date):
'''
returns the index of given or best fitting date
'''
try:
date = _toDate(date)
i = _insertDateIndex(date, l) - 1
if i == -1:
return l[0]
return l[i]
except (ValueError, TypeError):
# ValueError: date invalid / TypeError: date = None
return l[0]
|
python
|
def _getFromDate(l, date):
'''
returns the index of given or best fitting date
'''
try:
date = _toDate(date)
i = _insertDateIndex(date, l) - 1
if i == -1:
return l[0]
return l[i]
except (ValueError, TypeError):
# ValueError: date invalid / TypeError: date = None
return l[0]
|
[
"def",
"_getFromDate",
"(",
"l",
",",
"date",
")",
":",
"try",
":",
"date",
"=",
"_toDate",
"(",
"date",
")",
"i",
"=",
"_insertDateIndex",
"(",
"date",
",",
"l",
")",
"-",
"1",
"if",
"i",
"==",
"-",
"1",
":",
"return",
"l",
"[",
"0",
"]",
"return",
"l",
"[",
"i",
"]",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"# ValueError: date invalid / TypeError: date = None\r",
"return",
"l",
"[",
"0",
"]"
] |
returns the index of given or best fitting date
|
[
"returns",
"the",
"index",
"of",
"given",
"or",
"best",
"fitting",
"date"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L37-L49
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.dates
|
def dates(self, typ, light=None):
'''
Args:
typ: type of calibration to look for. See .coeffs.keys() for all types available
light (Optional[str]): restrict to calibrations, done given light source
Returns:
list: All calibration dates available for given typ
'''
try:
d = self._getDate(typ, light)
return [self._toDateStr(c[0]) for c in d]
except KeyError:
return []
|
python
|
def dates(self, typ, light=None):
'''
Args:
typ: type of calibration to look for. See .coeffs.keys() for all types available
light (Optional[str]): restrict to calibrations, done given light source
Returns:
list: All calibration dates available for given typ
'''
try:
d = self._getDate(typ, light)
return [self._toDateStr(c[0]) for c in d]
except KeyError:
return []
|
[
"def",
"dates",
"(",
"self",
",",
"typ",
",",
"light",
"=",
"None",
")",
":",
"try",
":",
"d",
"=",
"self",
".",
"_getDate",
"(",
"typ",
",",
"light",
")",
"return",
"[",
"self",
".",
"_toDateStr",
"(",
"c",
"[",
"0",
"]",
")",
"for",
"c",
"in",
"d",
"]",
"except",
"KeyError",
":",
"return",
"[",
"]"
] |
Args:
typ: type of calibration to look for. See .coeffs.keys() for all types available
light (Optional[str]): restrict to calibrations, done given light source
Returns:
list: All calibration dates available for given typ
|
[
"Args",
":",
"typ",
":",
"type",
"of",
"calibration",
"to",
"look",
"for",
".",
"See",
".",
"coeffs",
".",
"keys",
"()",
"for",
"all",
"types",
"available",
"light",
"(",
"Optional",
"[",
"str",
"]",
")",
":",
"restrict",
"to",
"calibrations",
"done",
"given",
"light",
"source",
"Returns",
":",
"list",
":",
"All",
"calibration",
"dates",
"available",
"for",
"given",
"typ"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L91-L104
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.infos
|
def infos(self, typ, light=None, date=None):
'''
Args:
typ: type of calibration to look for. See .coeffs.keys() for all types available
date (Optional[str]): date of calibration
Returns:
list: all infos available for given typ
'''
d = self._getDate(typ, light)
if date is None:
return [c[1] for c in d]
# TODO: not struct time, but time in ms since epoch
return _getFromDate(d, date)[1]
|
python
|
def infos(self, typ, light=None, date=None):
'''
Args:
typ: type of calibration to look for. See .coeffs.keys() for all types available
date (Optional[str]): date of calibration
Returns:
list: all infos available for given typ
'''
d = self._getDate(typ, light)
if date is None:
return [c[1] for c in d]
# TODO: not struct time, but time in ms since epoch
return _getFromDate(d, date)[1]
|
[
"def",
"infos",
"(",
"self",
",",
"typ",
",",
"light",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"d",
"=",
"self",
".",
"_getDate",
"(",
"typ",
",",
"light",
")",
"if",
"date",
"is",
"None",
":",
"return",
"[",
"c",
"[",
"1",
"]",
"for",
"c",
"in",
"d",
"]",
"# TODO: not struct time, but time in ms since epoch\r",
"return",
"_getFromDate",
"(",
"d",
",",
"date",
")",
"[",
"1",
"]"
] |
Args:
typ: type of calibration to look for. See .coeffs.keys() for all types available
date (Optional[str]): date of calibration
Returns:
list: all infos available for given typ
|
[
"Args",
":",
"typ",
":",
"type",
"of",
"calibration",
"to",
"look",
"for",
".",
"See",
".",
"coeffs",
".",
"keys",
"()",
"for",
"all",
"types",
"available",
"date",
"(",
"Optional",
"[",
"str",
"]",
")",
":",
"date",
"of",
"calibration",
"Returns",
":",
"list",
":",
"all",
"infos",
"available",
"for",
"given",
"typ"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L106-L119
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.overview
|
def overview(self):
'''
Returns:
str: an overview covering all calibrations
infos and shapes
'''
c = self.coeffs
out = 'camera name: %s' % c['name']
out += '\nmax value: %s' % c['depth']
out += '\nlight spectra: %s' % c['light spectra']
out += '\ndark current:'
for (date, info, (slope, intercept), error) in c['dark current']:
out += '\n\t date: %s' % self._toDateStr(date)
out += '\n\t\t info: %s; slope:%s, intercept:%s' % (
info, slope.shape, intercept.shape)
out += '\nflat field:'
for light, vals in c['flat field'].items():
out += '\n\t light: %s' % light
for (date, info, arr, error) in vals:
out += '\n\t\t date: %s' % self._toDateStr(date)
out += '\n\t\t\t info: %s; array:%s' % (info, arr.shape)
out += '\nlens:'
for light, vals in c['lens'].items():
out += '\n\t light: %s' % light
for (date, info, coeffs) in vals:
out += '\n\t\t date: %s' % self._toDateStr(date)
out += '\n\t\t\t info: %s; coeffs:%s' % (info, coeffs)
out += '\nnoise:'
for (date, info, nlf_coeff, error) in c['noise']:
out += '\n\t date: %s' % self._toDateStr(date)
out += '\n\t\t info: %s; coeffs:%s' % (info, nlf_coeff)
out += '\nPoint spread function:'
for light, vals in c['psf'].items():
out += '\n\t light: %s' % light
for (date, info, psf) in vals:
out += '\n\t\t date: %s' % self._toDateStr(date)
out += '\n\t\t\t info: %s; shape:%s' % (info, psf.shape)
return out
|
python
|
def overview(self):
'''
Returns:
str: an overview covering all calibrations
infos and shapes
'''
c = self.coeffs
out = 'camera name: %s' % c['name']
out += '\nmax value: %s' % c['depth']
out += '\nlight spectra: %s' % c['light spectra']
out += '\ndark current:'
for (date, info, (slope, intercept), error) in c['dark current']:
out += '\n\t date: %s' % self._toDateStr(date)
out += '\n\t\t info: %s; slope:%s, intercept:%s' % (
info, slope.shape, intercept.shape)
out += '\nflat field:'
for light, vals in c['flat field'].items():
out += '\n\t light: %s' % light
for (date, info, arr, error) in vals:
out += '\n\t\t date: %s' % self._toDateStr(date)
out += '\n\t\t\t info: %s; array:%s' % (info, arr.shape)
out += '\nlens:'
for light, vals in c['lens'].items():
out += '\n\t light: %s' % light
for (date, info, coeffs) in vals:
out += '\n\t\t date: %s' % self._toDateStr(date)
out += '\n\t\t\t info: %s; coeffs:%s' % (info, coeffs)
out += '\nnoise:'
for (date, info, nlf_coeff, error) in c['noise']:
out += '\n\t date: %s' % self._toDateStr(date)
out += '\n\t\t info: %s; coeffs:%s' % (info, nlf_coeff)
out += '\nPoint spread function:'
for light, vals in c['psf'].items():
out += '\n\t light: %s' % light
for (date, info, psf) in vals:
out += '\n\t\t date: %s' % self._toDateStr(date)
out += '\n\t\t\t info: %s; shape:%s' % (info, psf.shape)
return out
|
[
"def",
"overview",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"coeffs",
"out",
"=",
"'camera name: %s'",
"%",
"c",
"[",
"'name'",
"]",
"out",
"+=",
"'\\nmax value: %s'",
"%",
"c",
"[",
"'depth'",
"]",
"out",
"+=",
"'\\nlight spectra: %s'",
"%",
"c",
"[",
"'light spectra'",
"]",
"out",
"+=",
"'\\ndark current:'",
"for",
"(",
"date",
",",
"info",
",",
"(",
"slope",
",",
"intercept",
")",
",",
"error",
")",
"in",
"c",
"[",
"'dark current'",
"]",
":",
"out",
"+=",
"'\\n\\t date: %s'",
"%",
"self",
".",
"_toDateStr",
"(",
"date",
")",
"out",
"+=",
"'\\n\\t\\t info: %s; slope:%s, intercept:%s'",
"%",
"(",
"info",
",",
"slope",
".",
"shape",
",",
"intercept",
".",
"shape",
")",
"out",
"+=",
"'\\nflat field:'",
"for",
"light",
",",
"vals",
"in",
"c",
"[",
"'flat field'",
"]",
".",
"items",
"(",
")",
":",
"out",
"+=",
"'\\n\\t light: %s'",
"%",
"light",
"for",
"(",
"date",
",",
"info",
",",
"arr",
",",
"error",
")",
"in",
"vals",
":",
"out",
"+=",
"'\\n\\t\\t date: %s'",
"%",
"self",
".",
"_toDateStr",
"(",
"date",
")",
"out",
"+=",
"'\\n\\t\\t\\t info: %s; array:%s'",
"%",
"(",
"info",
",",
"arr",
".",
"shape",
")",
"out",
"+=",
"'\\nlens:'",
"for",
"light",
",",
"vals",
"in",
"c",
"[",
"'lens'",
"]",
".",
"items",
"(",
")",
":",
"out",
"+=",
"'\\n\\t light: %s'",
"%",
"light",
"for",
"(",
"date",
",",
"info",
",",
"coeffs",
")",
"in",
"vals",
":",
"out",
"+=",
"'\\n\\t\\t date: %s'",
"%",
"self",
".",
"_toDateStr",
"(",
"date",
")",
"out",
"+=",
"'\\n\\t\\t\\t info: %s; coeffs:%s'",
"%",
"(",
"info",
",",
"coeffs",
")",
"out",
"+=",
"'\\nnoise:'",
"for",
"(",
"date",
",",
"info",
",",
"nlf_coeff",
",",
"error",
")",
"in",
"c",
"[",
"'noise'",
"]",
":",
"out",
"+=",
"'\\n\\t date: %s'",
"%",
"self",
".",
"_toDateStr",
"(",
"date",
")",
"out",
"+=",
"'\\n\\t\\t info: %s; coeffs:%s'",
"%",
"(",
"info",
",",
"nlf_coeff",
")",
"out",
"+=",
"'\\nPoint spread function:'",
"for",
"light",
",",
"vals",
"in",
"c",
"[",
"'psf'",
"]",
".",
"items",
"(",
")",
":",
"out",
"+=",
"'\\n\\t light: %s'",
"%",
"light",
"for",
"(",
"date",
",",
"info",
",",
"psf",
")",
"in",
"vals",
":",
"out",
"+=",
"'\\n\\t\\t date: %s'",
"%",
"self",
".",
"_toDateStr",
"(",
"date",
")",
"out",
"+=",
"'\\n\\t\\t\\t info: %s; shape:%s'",
"%",
"(",
"info",
",",
"psf",
".",
"shape",
")",
"return",
"out"
] |
Returns:
str: an overview covering all calibrations
infos and shapes
|
[
"Returns",
":",
"str",
":",
"an",
"overview",
"covering",
"all",
"calibrations",
"infos",
"and",
"shapes"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L121-L164
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.setCamera
|
def setCamera(self, camera_name, bit_depth=16):
'''
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
'''
self.coeffs['name'] = camera_name
self.coeffs['depth'] = bit_depth
|
python
|
def setCamera(self, camera_name, bit_depth=16):
'''
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
'''
self.coeffs['name'] = camera_name
self.coeffs['depth'] = bit_depth
|
[
"def",
"setCamera",
"(",
"self",
",",
"camera_name",
",",
"bit_depth",
"=",
"16",
")",
":",
"self",
".",
"coeffs",
"[",
"'name'",
"]",
"=",
"camera_name",
"self",
".",
"coeffs",
"[",
"'depth'",
"]",
"=",
"bit_depth"
] |
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
|
[
"Args",
":",
"camera_name",
"(",
"str",
")",
":",
"Name",
"of",
"the",
"camera",
"bit_depth",
"(",
"int",
")",
":",
"depth",
"(",
"bit",
")",
"of",
"the",
"camera",
"sensor"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L178-L185
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.addDarkCurrent
|
def addDarkCurrent(self, slope, intercept=None, date=None, info='', error=None):
'''
Args:
slope (np.array)
intercept (np.array)
error (numpy.array)
slope (float): dPx/dExposureTime[sec]
error (float): absolute
date (str): "DD Mon YY" e.g. "30 Nov 16"
'''
date = _toDate(date)
self._checkShape(slope)
self._checkShape(intercept)
d = self.coeffs['dark current']
if intercept is None:
data = slope
else:
data = (slope, intercept)
d.insert(_insertDateIndex(date, d), [date, info, data, error])
|
python
|
def addDarkCurrent(self, slope, intercept=None, date=None, info='', error=None):
'''
Args:
slope (np.array)
intercept (np.array)
error (numpy.array)
slope (float): dPx/dExposureTime[sec]
error (float): absolute
date (str): "DD Mon YY" e.g. "30 Nov 16"
'''
date = _toDate(date)
self._checkShape(slope)
self._checkShape(intercept)
d = self.coeffs['dark current']
if intercept is None:
data = slope
else:
data = (slope, intercept)
d.insert(_insertDateIndex(date, d), [date, info, data, error])
|
[
"def",
"addDarkCurrent",
"(",
"self",
",",
"slope",
",",
"intercept",
"=",
"None",
",",
"date",
"=",
"None",
",",
"info",
"=",
"''",
",",
"error",
"=",
"None",
")",
":",
"date",
"=",
"_toDate",
"(",
"date",
")",
"self",
".",
"_checkShape",
"(",
"slope",
")",
"self",
".",
"_checkShape",
"(",
"intercept",
")",
"d",
"=",
"self",
".",
"coeffs",
"[",
"'dark current'",
"]",
"if",
"intercept",
"is",
"None",
":",
"data",
"=",
"slope",
"else",
":",
"data",
"=",
"(",
"slope",
",",
"intercept",
")",
"d",
".",
"insert",
"(",
"_insertDateIndex",
"(",
"date",
",",
"d",
")",
",",
"[",
"date",
",",
"info",
",",
"data",
",",
"error",
"]",
")"
] |
Args:
slope (np.array)
intercept (np.array)
error (numpy.array)
slope (float): dPx/dExposureTime[sec]
error (float): absolute
date (str): "DD Mon YY" e.g. "30 Nov 16"
|
[
"Args",
":",
"slope",
"(",
"np",
".",
"array",
")",
"intercept",
"(",
"np",
".",
"array",
")",
"error",
"(",
"numpy",
".",
"array",
")",
"slope",
"(",
"float",
")",
":",
"dPx",
"/",
"dExposureTime",
"[",
"sec",
"]",
"error",
"(",
"float",
")",
":",
"absolute",
"date",
"(",
"str",
")",
":",
"DD",
"Mon",
"YY",
"e",
".",
"g",
".",
"30",
"Nov",
"16"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L187-L207
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.addNoise
|
def addNoise(self, nlf_coeff, date=None, info='', error=None):
'''
Args:
nlf_coeff (list)
error (float): absolute
info (str): additional information
date (str): "DD Mon YY" e.g. "30 Nov 16"
'''
date = _toDate(date)
d = self.coeffs['noise']
d.insert(_insertDateIndex(date, d), [date, info, nlf_coeff, error])
|
python
|
def addNoise(self, nlf_coeff, date=None, info='', error=None):
'''
Args:
nlf_coeff (list)
error (float): absolute
info (str): additional information
date (str): "DD Mon YY" e.g. "30 Nov 16"
'''
date = _toDate(date)
d = self.coeffs['noise']
d.insert(_insertDateIndex(date, d), [date, info, nlf_coeff, error])
|
[
"def",
"addNoise",
"(",
"self",
",",
"nlf_coeff",
",",
"date",
"=",
"None",
",",
"info",
"=",
"''",
",",
"error",
"=",
"None",
")",
":",
"date",
"=",
"_toDate",
"(",
"date",
")",
"d",
"=",
"self",
".",
"coeffs",
"[",
"'noise'",
"]",
"d",
".",
"insert",
"(",
"_insertDateIndex",
"(",
"date",
",",
"d",
")",
",",
"[",
"date",
",",
"info",
",",
"nlf_coeff",
",",
"error",
"]",
")"
] |
Args:
nlf_coeff (list)
error (float): absolute
info (str): additional information
date (str): "DD Mon YY" e.g. "30 Nov 16"
|
[
"Args",
":",
"nlf_coeff",
"(",
"list",
")",
"error",
"(",
"float",
")",
":",
"absolute",
"info",
"(",
"str",
")",
":",
"additional",
"information",
"date",
"(",
"str",
")",
":",
"DD",
"Mon",
"YY",
"e",
".",
"g",
".",
"30",
"Nov",
"16"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L209-L219
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.addPSF
|
def addPSF(self, psf, date=None, info='', light_spectrum='visible'):
'''
add a new point spread function
'''
self._registerLight(light_spectrum)
date = _toDate(date)
f = self.coeffs['psf']
if light_spectrum not in f:
f[light_spectrum] = []
f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]),
[date, info, psf])
|
python
|
def addPSF(self, psf, date=None, info='', light_spectrum='visible'):
'''
add a new point spread function
'''
self._registerLight(light_spectrum)
date = _toDate(date)
f = self.coeffs['psf']
if light_spectrum not in f:
f[light_spectrum] = []
f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]),
[date, info, psf])
|
[
"def",
"addPSF",
"(",
"self",
",",
"psf",
",",
"date",
"=",
"None",
",",
"info",
"=",
"''",
",",
"light_spectrum",
"=",
"'visible'",
")",
":",
"self",
".",
"_registerLight",
"(",
"light_spectrum",
")",
"date",
"=",
"_toDate",
"(",
"date",
")",
"f",
"=",
"self",
".",
"coeffs",
"[",
"'psf'",
"]",
"if",
"light_spectrum",
"not",
"in",
"f",
":",
"f",
"[",
"light_spectrum",
"]",
"=",
"[",
"]",
"f",
"[",
"light_spectrum",
"]",
".",
"insert",
"(",
"_insertDateIndex",
"(",
"date",
",",
"f",
"[",
"light_spectrum",
"]",
")",
",",
"[",
"date",
",",
"info",
",",
"psf",
"]",
")"
] |
add a new point spread function
|
[
"add",
"a",
"new",
"point",
"spread",
"function"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L232-L243
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.addFlatField
|
def addFlatField(self, arr, date=None, info='', error=None,
light_spectrum='visible'):
'''
light_spectrum = light, IR ...
'''
self._registerLight(light_spectrum)
self._checkShape(arr)
date = _toDate(date)
f = self.coeffs['flat field']
if light_spectrum not in f:
f[light_spectrum] = []
f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]),
[date, info, arr, error])
|
python
|
def addFlatField(self, arr, date=None, info='', error=None,
light_spectrum='visible'):
'''
light_spectrum = light, IR ...
'''
self._registerLight(light_spectrum)
self._checkShape(arr)
date = _toDate(date)
f = self.coeffs['flat field']
if light_spectrum not in f:
f[light_spectrum] = []
f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]),
[date, info, arr, error])
|
[
"def",
"addFlatField",
"(",
"self",
",",
"arr",
",",
"date",
"=",
"None",
",",
"info",
"=",
"''",
",",
"error",
"=",
"None",
",",
"light_spectrum",
"=",
"'visible'",
")",
":",
"self",
".",
"_registerLight",
"(",
"light_spectrum",
")",
"self",
".",
"_checkShape",
"(",
"arr",
")",
"date",
"=",
"_toDate",
"(",
"date",
")",
"f",
"=",
"self",
".",
"coeffs",
"[",
"'flat field'",
"]",
"if",
"light_spectrum",
"not",
"in",
"f",
":",
"f",
"[",
"light_spectrum",
"]",
"=",
"[",
"]",
"f",
"[",
"light_spectrum",
"]",
".",
"insert",
"(",
"_insertDateIndex",
"(",
"date",
",",
"f",
"[",
"light_spectrum",
"]",
")",
",",
"[",
"date",
",",
"info",
",",
"arr",
",",
"error",
"]",
")"
] |
light_spectrum = light, IR ...
|
[
"light_spectrum",
"=",
"light",
"IR",
"..."
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L255-L267
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.addLens
|
def addLens(self, lens, date=None, info='', light_spectrum='visible'):
'''
lens -> instance of LensDistortion or saved file
'''
self._registerLight(light_spectrum)
date = _toDate(date)
if not isinstance(lens, LensDistortion):
l = LensDistortion()
l.readFromFile(lens)
lens = l
f = self.coeffs['lens']
if light_spectrum not in f:
f[light_spectrum] = []
f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]),
[date, info, lens.coeffs])
|
python
|
def addLens(self, lens, date=None, info='', light_spectrum='visible'):
'''
lens -> instance of LensDistortion or saved file
'''
self._registerLight(light_spectrum)
date = _toDate(date)
if not isinstance(lens, LensDistortion):
l = LensDistortion()
l.readFromFile(lens)
lens = l
f = self.coeffs['lens']
if light_spectrum not in f:
f[light_spectrum] = []
f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]),
[date, info, lens.coeffs])
|
[
"def",
"addLens",
"(",
"self",
",",
"lens",
",",
"date",
"=",
"None",
",",
"info",
"=",
"''",
",",
"light_spectrum",
"=",
"'visible'",
")",
":",
"self",
".",
"_registerLight",
"(",
"light_spectrum",
")",
"date",
"=",
"_toDate",
"(",
"date",
")",
"if",
"not",
"isinstance",
"(",
"lens",
",",
"LensDistortion",
")",
":",
"l",
"=",
"LensDistortion",
"(",
")",
"l",
".",
"readFromFile",
"(",
"lens",
")",
"lens",
"=",
"l",
"f",
"=",
"self",
".",
"coeffs",
"[",
"'lens'",
"]",
"if",
"light_spectrum",
"not",
"in",
"f",
":",
"f",
"[",
"light_spectrum",
"]",
"=",
"[",
"]",
"f",
"[",
"light_spectrum",
"]",
".",
"insert",
"(",
"_insertDateIndex",
"(",
"date",
",",
"f",
"[",
"light_spectrum",
"]",
")",
",",
"[",
"date",
",",
"info",
",",
"lens",
".",
"coeffs",
"]",
")"
] |
lens -> instance of LensDistortion or saved file
|
[
"lens",
"-",
">",
"instance",
"of",
"LensDistortion",
"or",
"saved",
"file"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L269-L285
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.clearOldCalibrations
|
def clearOldCalibrations(self, date=None):
'''
if not only a specific date than remove all except of the youngest calibration
'''
self.coeffs['dark current'] = [self.coeffs['dark current'][-1]]
self.coeffs['noise'] = [self.coeffs['noise'][-1]]
for light in self.coeffs['flat field']:
self.coeffs['flat field'][light] = [
self.coeffs['flat field'][light][-1]]
for light in self.coeffs['lens']:
self.coeffs['lens'][light] = [self.coeffs['lens'][light][-1]]
|
python
|
def clearOldCalibrations(self, date=None):
'''
if not only a specific date than remove all except of the youngest calibration
'''
self.coeffs['dark current'] = [self.coeffs['dark current'][-1]]
self.coeffs['noise'] = [self.coeffs['noise'][-1]]
for light in self.coeffs['flat field']:
self.coeffs['flat field'][light] = [
self.coeffs['flat field'][light][-1]]
for light in self.coeffs['lens']:
self.coeffs['lens'][light] = [self.coeffs['lens'][light][-1]]
|
[
"def",
"clearOldCalibrations",
"(",
"self",
",",
"date",
"=",
"None",
")",
":",
"self",
".",
"coeffs",
"[",
"'dark current'",
"]",
"=",
"[",
"self",
".",
"coeffs",
"[",
"'dark current'",
"]",
"[",
"-",
"1",
"]",
"]",
"self",
".",
"coeffs",
"[",
"'noise'",
"]",
"=",
"[",
"self",
".",
"coeffs",
"[",
"'noise'",
"]",
"[",
"-",
"1",
"]",
"]",
"for",
"light",
"in",
"self",
".",
"coeffs",
"[",
"'flat field'",
"]",
":",
"self",
".",
"coeffs",
"[",
"'flat field'",
"]",
"[",
"light",
"]",
"=",
"[",
"self",
".",
"coeffs",
"[",
"'flat field'",
"]",
"[",
"light",
"]",
"[",
"-",
"1",
"]",
"]",
"for",
"light",
"in",
"self",
".",
"coeffs",
"[",
"'lens'",
"]",
":",
"self",
".",
"coeffs",
"[",
"'lens'",
"]",
"[",
"light",
"]",
"=",
"[",
"self",
".",
"coeffs",
"[",
"'lens'",
"]",
"[",
"light",
"]",
"[",
"-",
"1",
"]",
"]"
] |
if not only a specific date than remove all except of the youngest calibration
|
[
"if",
"not",
"only",
"a",
"specific",
"date",
"than",
"remove",
"all",
"except",
"of",
"the",
"youngest",
"calibration"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L287-L298
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.transpose
|
def transpose(self):
'''
transpose all calibration arrays
in case different array shape orders were used (x,y) vs. (y,x)
'''
def _t(item):
if type(item) == list:
for n, it in enumerate(item):
if type(it) == tuple:
it = list(it)
item[n] = it
if type(it) == list:
_t(it)
if isinstance(it, np.ndarray) and it.shape == s:
item[n] = it.T
s = self.coeffs['shape']
for item in self.coeffs.values():
if type(item) == dict:
for item2 in item.values():
_t(item2)
else:
_t(item)
self.coeffs['shape'] = s[::-1]
|
python
|
def transpose(self):
'''
transpose all calibration arrays
in case different array shape orders were used (x,y) vs. (y,x)
'''
def _t(item):
if type(item) == list:
for n, it in enumerate(item):
if type(it) == tuple:
it = list(it)
item[n] = it
if type(it) == list:
_t(it)
if isinstance(it, np.ndarray) and it.shape == s:
item[n] = it.T
s = self.coeffs['shape']
for item in self.coeffs.values():
if type(item) == dict:
for item2 in item.values():
_t(item2)
else:
_t(item)
self.coeffs['shape'] = s[::-1]
|
[
"def",
"transpose",
"(",
"self",
")",
":",
"def",
"_t",
"(",
"item",
")",
":",
"if",
"type",
"(",
"item",
")",
"==",
"list",
":",
"for",
"n",
",",
"it",
"in",
"enumerate",
"(",
"item",
")",
":",
"if",
"type",
"(",
"it",
")",
"==",
"tuple",
":",
"it",
"=",
"list",
"(",
"it",
")",
"item",
"[",
"n",
"]",
"=",
"it",
"if",
"type",
"(",
"it",
")",
"==",
"list",
":",
"_t",
"(",
"it",
")",
"if",
"isinstance",
"(",
"it",
",",
"np",
".",
"ndarray",
")",
"and",
"it",
".",
"shape",
"==",
"s",
":",
"item",
"[",
"n",
"]",
"=",
"it",
".",
"T",
"s",
"=",
"self",
".",
"coeffs",
"[",
"'shape'",
"]",
"for",
"item",
"in",
"self",
".",
"coeffs",
".",
"values",
"(",
")",
":",
"if",
"type",
"(",
"item",
")",
"==",
"dict",
":",
"for",
"item2",
"in",
"item",
".",
"values",
"(",
")",
":",
"_t",
"(",
"item2",
")",
"else",
":",
"_t",
"(",
"item",
")",
"self",
".",
"coeffs",
"[",
"'shape'",
"]",
"=",
"s",
"[",
":",
":",
"-",
"1",
"]"
] |
transpose all calibration arrays
in case different array shape orders were used (x,y) vs. (y,x)
|
[
"transpose",
"all",
"calibration",
"arrays",
"in",
"case",
"different",
"array",
"shape",
"orders",
"were",
"used",
"(",
"x",
"y",
")",
"vs",
".",
"(",
"y",
"x",
")"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L324-L349
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.correct
|
def correct(self, images,
bgImages=None,
exposure_time=None,
light_spectrum=None,
threshold=0.1,
keep_size=True,
date=None,
deblur=False,
denoise=False):
'''
exposure_time [s]
date -> string e.g. '30. Nov 15' to get a calibration on from date
-> {'dark current':'30. Nov 15',
'flat field':'15. Nov 15',
'lens':'14. Nov 15',
'noise':'01. Nov 15'}
'''
print('CORRECT CAMERA ...')
if isinstance(date, string_types) or date is None:
date = {'dark current': date,
'flat field': date,
'lens': date,
'noise': date,
'psf': date}
if light_spectrum is None:
try:
light_spectrum = self.coeffs['light spectra'][0]
except IndexError:
pass
# do we have multiple images?
if (type(images) in (list, tuple) or
(isinstance(images, np.ndarray) and
images.ndim == 3 and
images.shape[-1] not in (3, 4) # is color
)):
if len(images) > 1:
# 0.NOISE
n = self.coeffs['noise']
if self.noise_level_function is None and len(n):
n = _getFromDate(n, date['noise'])[2]
self.noise_level_function = lambda x: NoiseLevelFunction.boundedFunction(
x, *n)
print('... remove single-time-effects from images ')
# 1. STE REMOVAL ONLY IF >=2 IMAGES ARE GIVEN:
ste = SingleTimeEffectDetection(images, nStd=4,
noise_level_function=self.noise_level_function)
image = ste.noSTE
if self.noise_level_function is None:
self.noise_level_function = ste.noise_level_function
else:
image = np.asfarray(imread(images[0], dtype=np.float))
else:
image = np.asfarray(imread(images, dtype=np.float))
self._checkShape(image)
self.last_light_spectrum = light_spectrum
self.last_img = image
# 2. BACKGROUND REMOVAL
try:
self._correctDarkCurrent(image, exposure_time, bgImages,
date['dark current'])
except Exception as errm:
print('Error: %s' % errm)
# 3. VIGNETTING/SENSITIVITY CORRECTION:
try:
self._correctVignetting(image, light_spectrum,
date['flat field'])
except Exception as errm:
print('Error: %s' % errm)
# 4. REPLACE DECECTIVE PX WITH MEDIAN FILTERED FALUE
if threshold > 0:
print('... remove artefacts')
try:
image = self._correctArtefacts(image, threshold)
except Exception as errm:
print('Error: %s' % errm)
# 5. DEBLUR
if deblur:
print('... remove blur')
try:
image = self._correctBlur(image, light_spectrum, date['psf'])
except Exception as errm:
print('Error: %s' % errm)
# 5. LENS CORRECTION:
try:
image = self._correctLens(image, light_spectrum, date['lens'],
keep_size)
except TypeError:
'Error: no lens calibration found'
except Exception as errm:
print('Error: %s' % errm)
# 6. Denoise
if denoise:
print('... denoise ... this might take some time')
image = self._correctNoise(image)
print('DONE')
return image
|
python
|
def correct(self, images,
bgImages=None,
exposure_time=None,
light_spectrum=None,
threshold=0.1,
keep_size=True,
date=None,
deblur=False,
denoise=False):
'''
exposure_time [s]
date -> string e.g. '30. Nov 15' to get a calibration on from date
-> {'dark current':'30. Nov 15',
'flat field':'15. Nov 15',
'lens':'14. Nov 15',
'noise':'01. Nov 15'}
'''
print('CORRECT CAMERA ...')
if isinstance(date, string_types) or date is None:
date = {'dark current': date,
'flat field': date,
'lens': date,
'noise': date,
'psf': date}
if light_spectrum is None:
try:
light_spectrum = self.coeffs['light spectra'][0]
except IndexError:
pass
# do we have multiple images?
if (type(images) in (list, tuple) or
(isinstance(images, np.ndarray) and
images.ndim == 3 and
images.shape[-1] not in (3, 4) # is color
)):
if len(images) > 1:
# 0.NOISE
n = self.coeffs['noise']
if self.noise_level_function is None and len(n):
n = _getFromDate(n, date['noise'])[2]
self.noise_level_function = lambda x: NoiseLevelFunction.boundedFunction(
x, *n)
print('... remove single-time-effects from images ')
# 1. STE REMOVAL ONLY IF >=2 IMAGES ARE GIVEN:
ste = SingleTimeEffectDetection(images, nStd=4,
noise_level_function=self.noise_level_function)
image = ste.noSTE
if self.noise_level_function is None:
self.noise_level_function = ste.noise_level_function
else:
image = np.asfarray(imread(images[0], dtype=np.float))
else:
image = np.asfarray(imread(images, dtype=np.float))
self._checkShape(image)
self.last_light_spectrum = light_spectrum
self.last_img = image
# 2. BACKGROUND REMOVAL
try:
self._correctDarkCurrent(image, exposure_time, bgImages,
date['dark current'])
except Exception as errm:
print('Error: %s' % errm)
# 3. VIGNETTING/SENSITIVITY CORRECTION:
try:
self._correctVignetting(image, light_spectrum,
date['flat field'])
except Exception as errm:
print('Error: %s' % errm)
# 4. REPLACE DECECTIVE PX WITH MEDIAN FILTERED FALUE
if threshold > 0:
print('... remove artefacts')
try:
image = self._correctArtefacts(image, threshold)
except Exception as errm:
print('Error: %s' % errm)
# 5. DEBLUR
if deblur:
print('... remove blur')
try:
image = self._correctBlur(image, light_spectrum, date['psf'])
except Exception as errm:
print('Error: %s' % errm)
# 5. LENS CORRECTION:
try:
image = self._correctLens(image, light_spectrum, date['lens'],
keep_size)
except TypeError:
'Error: no lens calibration found'
except Exception as errm:
print('Error: %s' % errm)
# 6. Denoise
if denoise:
print('... denoise ... this might take some time')
image = self._correctNoise(image)
print('DONE')
return image
|
[
"def",
"correct",
"(",
"self",
",",
"images",
",",
"bgImages",
"=",
"None",
",",
"exposure_time",
"=",
"None",
",",
"light_spectrum",
"=",
"None",
",",
"threshold",
"=",
"0.1",
",",
"keep_size",
"=",
"True",
",",
"date",
"=",
"None",
",",
"deblur",
"=",
"False",
",",
"denoise",
"=",
"False",
")",
":",
"print",
"(",
"'CORRECT CAMERA ...'",
")",
"if",
"isinstance",
"(",
"date",
",",
"string_types",
")",
"or",
"date",
"is",
"None",
":",
"date",
"=",
"{",
"'dark current'",
":",
"date",
",",
"'flat field'",
":",
"date",
",",
"'lens'",
":",
"date",
",",
"'noise'",
":",
"date",
",",
"'psf'",
":",
"date",
"}",
"if",
"light_spectrum",
"is",
"None",
":",
"try",
":",
"light_spectrum",
"=",
"self",
".",
"coeffs",
"[",
"'light spectra'",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"pass",
"# do we have multiple images?\r",
"if",
"(",
"type",
"(",
"images",
")",
"in",
"(",
"list",
",",
"tuple",
")",
"or",
"(",
"isinstance",
"(",
"images",
",",
"np",
".",
"ndarray",
")",
"and",
"images",
".",
"ndim",
"==",
"3",
"and",
"images",
".",
"shape",
"[",
"-",
"1",
"]",
"not",
"in",
"(",
"3",
",",
"4",
")",
"# is color\r",
")",
")",
":",
"if",
"len",
"(",
"images",
")",
">",
"1",
":",
"# 0.NOISE\r",
"n",
"=",
"self",
".",
"coeffs",
"[",
"'noise'",
"]",
"if",
"self",
".",
"noise_level_function",
"is",
"None",
"and",
"len",
"(",
"n",
")",
":",
"n",
"=",
"_getFromDate",
"(",
"n",
",",
"date",
"[",
"'noise'",
"]",
")",
"[",
"2",
"]",
"self",
".",
"noise_level_function",
"=",
"lambda",
"x",
":",
"NoiseLevelFunction",
".",
"boundedFunction",
"(",
"x",
",",
"*",
"n",
")",
"print",
"(",
"'... remove single-time-effects from images '",
")",
"# 1. STE REMOVAL ONLY IF >=2 IMAGES ARE GIVEN:\r",
"ste",
"=",
"SingleTimeEffectDetection",
"(",
"images",
",",
"nStd",
"=",
"4",
",",
"noise_level_function",
"=",
"self",
".",
"noise_level_function",
")",
"image",
"=",
"ste",
".",
"noSTE",
"if",
"self",
".",
"noise_level_function",
"is",
"None",
":",
"self",
".",
"noise_level_function",
"=",
"ste",
".",
"noise_level_function",
"else",
":",
"image",
"=",
"np",
".",
"asfarray",
"(",
"imread",
"(",
"images",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"float",
")",
")",
"else",
":",
"image",
"=",
"np",
".",
"asfarray",
"(",
"imread",
"(",
"images",
",",
"dtype",
"=",
"np",
".",
"float",
")",
")",
"self",
".",
"_checkShape",
"(",
"image",
")",
"self",
".",
"last_light_spectrum",
"=",
"light_spectrum",
"self",
".",
"last_img",
"=",
"image",
"# 2. BACKGROUND REMOVAL\r",
"try",
":",
"self",
".",
"_correctDarkCurrent",
"(",
"image",
",",
"exposure_time",
",",
"bgImages",
",",
"date",
"[",
"'dark current'",
"]",
")",
"except",
"Exception",
"as",
"errm",
":",
"print",
"(",
"'Error: %s'",
"%",
"errm",
")",
"# 3. VIGNETTING/SENSITIVITY CORRECTION:\r",
"try",
":",
"self",
".",
"_correctVignetting",
"(",
"image",
",",
"light_spectrum",
",",
"date",
"[",
"'flat field'",
"]",
")",
"except",
"Exception",
"as",
"errm",
":",
"print",
"(",
"'Error: %s'",
"%",
"errm",
")",
"# 4. REPLACE DECECTIVE PX WITH MEDIAN FILTERED FALUE\r",
"if",
"threshold",
">",
"0",
":",
"print",
"(",
"'... remove artefacts'",
")",
"try",
":",
"image",
"=",
"self",
".",
"_correctArtefacts",
"(",
"image",
",",
"threshold",
")",
"except",
"Exception",
"as",
"errm",
":",
"print",
"(",
"'Error: %s'",
"%",
"errm",
")",
"# 5. DEBLUR\r",
"if",
"deblur",
":",
"print",
"(",
"'... remove blur'",
")",
"try",
":",
"image",
"=",
"self",
".",
"_correctBlur",
"(",
"image",
",",
"light_spectrum",
",",
"date",
"[",
"'psf'",
"]",
")",
"except",
"Exception",
"as",
"errm",
":",
"print",
"(",
"'Error: %s'",
"%",
"errm",
")",
"# 5. LENS CORRECTION:\r",
"try",
":",
"image",
"=",
"self",
".",
"_correctLens",
"(",
"image",
",",
"light_spectrum",
",",
"date",
"[",
"'lens'",
"]",
",",
"keep_size",
")",
"except",
"TypeError",
":",
"'Error: no lens calibration found'",
"except",
"Exception",
"as",
"errm",
":",
"print",
"(",
"'Error: %s'",
"%",
"errm",
")",
"# 6. Denoise\r",
"if",
"denoise",
":",
"print",
"(",
"'... denoise ... this might take some time'",
")",
"image",
"=",
"self",
".",
"_correctNoise",
"(",
"image",
")",
"print",
"(",
"'DONE'",
")",
"return",
"image"
] |
exposure_time [s]
date -> string e.g. '30. Nov 15' to get a calibration on from date
-> {'dark current':'30. Nov 15',
'flat field':'15. Nov 15',
'lens':'14. Nov 15',
'noise':'01. Nov 15'}
|
[
"exposure_time",
"[",
"s",
"]",
"date",
"-",
">",
"string",
"e",
".",
"g",
".",
"30",
".",
"Nov",
"15",
"to",
"get",
"a",
"calibration",
"on",
"from",
"date",
"-",
">",
"{",
"dark",
"current",
":",
"30",
".",
"Nov",
"15",
"flat",
"field",
":",
"15",
".",
"Nov",
"15",
"lens",
":",
"14",
".",
"Nov",
"15",
"noise",
":",
"01",
".",
"Nov",
"15",
"}"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L351-L459
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration._correctNoise
|
def _correctNoise(self, image):
'''
denoise using non-local-means
with guessing best parameters
'''
from skimage.restoration import denoise_nl_means # save startup time
image[np.isnan(image)] = 0 # otherwise result =nan
out = denoise_nl_means(image,
patch_size=7,
patch_distance=11,
#h=signalStd(image) * 0.1
)
return out
|
python
|
def _correctNoise(self, image):
'''
denoise using non-local-means
with guessing best parameters
'''
from skimage.restoration import denoise_nl_means # save startup time
image[np.isnan(image)] = 0 # otherwise result =nan
out = denoise_nl_means(image,
patch_size=7,
patch_distance=11,
#h=signalStd(image) * 0.1
)
return out
|
[
"def",
"_correctNoise",
"(",
"self",
",",
"image",
")",
":",
"from",
"skimage",
".",
"restoration",
"import",
"denoise_nl_means",
"# save startup time\r",
"image",
"[",
"np",
".",
"isnan",
"(",
"image",
")",
"]",
"=",
"0",
"# otherwise result =nan\r",
"out",
"=",
"denoise_nl_means",
"(",
"image",
",",
"patch_size",
"=",
"7",
",",
"patch_distance",
"=",
"11",
",",
"#h=signalStd(image) * 0.1\r",
")",
"return",
"out"
] |
denoise using non-local-means
with guessing best parameters
|
[
"denoise",
"using",
"non",
"-",
"local",
"-",
"means",
"with",
"guessing",
"best",
"parameters"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L461-L474
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration._correctDarkCurrent
|
def _correctDarkCurrent(self, image, exposuretime, bgImages, date):
'''
open OR calculate a background image: f(t)=m*t+n
'''
# either exposureTime or bgImages has to be given
# if exposuretime is not None or bgImages is not None:
print('... remove dark current')
if bgImages is not None:
if (type(bgImages) in (list, tuple) or
(isinstance(bgImages, np.ndarray) and
bgImages.ndim == 3)):
if len(bgImages) > 1:
# if multiple images are given: do STE removal:
nlf = self.noise_level_function
bg = SingleTimeEffectDetection(
bgImages, nStd=4,
noise_level_function=nlf).noSTE
else:
bg = imread(bgImages[0])
else:
bg = imread(bgImages)
else:
bg = self.calcDarkCurrent(exposuretime, date)
self.temp['bg'] = bg
image -= bg
|
python
|
def _correctDarkCurrent(self, image, exposuretime, bgImages, date):
'''
open OR calculate a background image: f(t)=m*t+n
'''
# either exposureTime or bgImages has to be given
# if exposuretime is not None or bgImages is not None:
print('... remove dark current')
if bgImages is not None:
if (type(bgImages) in (list, tuple) or
(isinstance(bgImages, np.ndarray) and
bgImages.ndim == 3)):
if len(bgImages) > 1:
# if multiple images are given: do STE removal:
nlf = self.noise_level_function
bg = SingleTimeEffectDetection(
bgImages, nStd=4,
noise_level_function=nlf).noSTE
else:
bg = imread(bgImages[0])
else:
bg = imread(bgImages)
else:
bg = self.calcDarkCurrent(exposuretime, date)
self.temp['bg'] = bg
image -= bg
|
[
"def",
"_correctDarkCurrent",
"(",
"self",
",",
"image",
",",
"exposuretime",
",",
"bgImages",
",",
"date",
")",
":",
"# either exposureTime or bgImages has to be given\r",
"# if exposuretime is not None or bgImages is not None:\r",
"print",
"(",
"'... remove dark current'",
")",
"if",
"bgImages",
"is",
"not",
"None",
":",
"if",
"(",
"type",
"(",
"bgImages",
")",
"in",
"(",
"list",
",",
"tuple",
")",
"or",
"(",
"isinstance",
"(",
"bgImages",
",",
"np",
".",
"ndarray",
")",
"and",
"bgImages",
".",
"ndim",
"==",
"3",
")",
")",
":",
"if",
"len",
"(",
"bgImages",
")",
">",
"1",
":",
"# if multiple images are given: do STE removal:\r",
"nlf",
"=",
"self",
".",
"noise_level_function",
"bg",
"=",
"SingleTimeEffectDetection",
"(",
"bgImages",
",",
"nStd",
"=",
"4",
",",
"noise_level_function",
"=",
"nlf",
")",
".",
"noSTE",
"else",
":",
"bg",
"=",
"imread",
"(",
"bgImages",
"[",
"0",
"]",
")",
"else",
":",
"bg",
"=",
"imread",
"(",
"bgImages",
")",
"else",
":",
"bg",
"=",
"self",
".",
"calcDarkCurrent",
"(",
"exposuretime",
",",
"date",
")",
"self",
".",
"temp",
"[",
"'bg'",
"]",
"=",
"bg",
"image",
"-=",
"bg"
] |
open OR calculate a background image: f(t)=m*t+n
|
[
"open",
"OR",
"calculate",
"a",
"background",
"image",
":",
"f",
"(",
"t",
")",
"=",
"m",
"*",
"t",
"+",
"n"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L476-L502
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration._correctArtefacts
|
def _correctArtefacts(self, image, threshold):
'''
Apply a thresholded median replacing high gradients
and values beyond the boundaries
'''
image = np.nan_to_num(image)
medianThreshold(image, threshold, copy=False)
return image
|
python
|
def _correctArtefacts(self, image, threshold):
'''
Apply a thresholded median replacing high gradients
and values beyond the boundaries
'''
image = np.nan_to_num(image)
medianThreshold(image, threshold, copy=False)
return image
|
[
"def",
"_correctArtefacts",
"(",
"self",
",",
"image",
",",
"threshold",
")",
":",
"image",
"=",
"np",
".",
"nan_to_num",
"(",
"image",
")",
"medianThreshold",
"(",
"image",
",",
"threshold",
",",
"copy",
"=",
"False",
")",
"return",
"image"
] |
Apply a thresholded median replacing high gradients
and values beyond the boundaries
|
[
"Apply",
"a",
"thresholded",
"median",
"replacing",
"high",
"gradients",
"and",
"values",
"beyond",
"the",
"boundaries"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L556-L563
|
radjkarl/imgProcessor
|
imgProcessor/camera/CameraCalibration.py
|
CameraCalibration.getCoeff
|
def getCoeff(self, name, light=None, date=None):
'''
try to get calibration for right light source, but
use another if they is none existent
'''
d = self.coeffs[name]
try:
c = d[light]
except KeyError:
try:
k, i = next(iter(d.items()))
if light is not None:
print(
'no calibration found for [%s] - using [%s] instead' % (light, k))
except StopIteration:
return None
c = i
except TypeError:
# coeff not dependent on light source
c = d
return _getFromDate(c, date)
|
python
|
def getCoeff(self, name, light=None, date=None):
'''
try to get calibration for right light source, but
use another if they is none existent
'''
d = self.coeffs[name]
try:
c = d[light]
except KeyError:
try:
k, i = next(iter(d.items()))
if light is not None:
print(
'no calibration found for [%s] - using [%s] instead' % (light, k))
except StopIteration:
return None
c = i
except TypeError:
# coeff not dependent on light source
c = d
return _getFromDate(c, date)
|
[
"def",
"getCoeff",
"(",
"self",
",",
"name",
",",
"light",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"d",
"=",
"self",
".",
"coeffs",
"[",
"name",
"]",
"try",
":",
"c",
"=",
"d",
"[",
"light",
"]",
"except",
"KeyError",
":",
"try",
":",
"k",
",",
"i",
"=",
"next",
"(",
"iter",
"(",
"d",
".",
"items",
"(",
")",
")",
")",
"if",
"light",
"is",
"not",
"None",
":",
"print",
"(",
"'no calibration found for [%s] - using [%s] instead'",
"%",
"(",
"light",
",",
"k",
")",
")",
"except",
"StopIteration",
":",
"return",
"None",
"c",
"=",
"i",
"except",
"TypeError",
":",
"# coeff not dependent on light source\r",
"c",
"=",
"d",
"return",
"_getFromDate",
"(",
"c",
",",
"date",
")"
] |
try to get calibration for right light source, but
use another if they is none existent
|
[
"try",
"to",
"get",
"calibration",
"for",
"right",
"light",
"source",
"but",
"use",
"another",
"if",
"they",
"is",
"none",
"existent"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L590-L611
|
radjkarl/imgProcessor
|
imgProcessor/camera/flatField/vignettingFromRandomSteps.py
|
vignettingFromRandomSteps
|
def vignettingFromRandomSteps(imgs, bg, inPlane_scale_factor=None,
debugFolder=None, **kwargs):
'''
important: first image should shown most iof the device
because it is used as reference
'''
# TODO: inPlane_scale_factor
if debugFolder:
debugFolder = PathStr(debugFolder)
s = ObjectVignettingSeparation(imgs[0], bg, **kwargs)
for img in imgs[1:]:
fit = s.addImg(img)
if debugFolder and fit is not False:
imwrite(debugFolder.join('fit_%s.tiff' % len(s.fits)), fit)
if debugFolder:
imwrite(debugFolder.join('init.tiff'), s.flatField)
smoothed_ff, mask, flatField, obj = s.separate()
if debugFolder:
imwrite(debugFolder.join('object.tiff'), obj)
imwrite(debugFolder.join('flatfield.tiff'), flatField, dtype=float)
imwrite(debugFolder.join('flatfield_smoothed.tiff'), smoothed_ff,
dtype=float)
return smoothed_ff, mask
|
python
|
def vignettingFromRandomSteps(imgs, bg, inPlane_scale_factor=None,
debugFolder=None, **kwargs):
'''
important: first image should shown most iof the device
because it is used as reference
'''
# TODO: inPlane_scale_factor
if debugFolder:
debugFolder = PathStr(debugFolder)
s = ObjectVignettingSeparation(imgs[0], bg, **kwargs)
for img in imgs[1:]:
fit = s.addImg(img)
if debugFolder and fit is not False:
imwrite(debugFolder.join('fit_%s.tiff' % len(s.fits)), fit)
if debugFolder:
imwrite(debugFolder.join('init.tiff'), s.flatField)
smoothed_ff, mask, flatField, obj = s.separate()
if debugFolder:
imwrite(debugFolder.join('object.tiff'), obj)
imwrite(debugFolder.join('flatfield.tiff'), flatField, dtype=float)
imwrite(debugFolder.join('flatfield_smoothed.tiff'), smoothed_ff,
dtype=float)
return smoothed_ff, mask
|
[
"def",
"vignettingFromRandomSteps",
"(",
"imgs",
",",
"bg",
",",
"inPlane_scale_factor",
"=",
"None",
",",
"debugFolder",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: inPlane_scale_factor\r",
"if",
"debugFolder",
":",
"debugFolder",
"=",
"PathStr",
"(",
"debugFolder",
")",
"s",
"=",
"ObjectVignettingSeparation",
"(",
"imgs",
"[",
"0",
"]",
",",
"bg",
",",
"*",
"*",
"kwargs",
")",
"for",
"img",
"in",
"imgs",
"[",
"1",
":",
"]",
":",
"fit",
"=",
"s",
".",
"addImg",
"(",
"img",
")",
"if",
"debugFolder",
"and",
"fit",
"is",
"not",
"False",
":",
"imwrite",
"(",
"debugFolder",
".",
"join",
"(",
"'fit_%s.tiff'",
"%",
"len",
"(",
"s",
".",
"fits",
")",
")",
",",
"fit",
")",
"if",
"debugFolder",
":",
"imwrite",
"(",
"debugFolder",
".",
"join",
"(",
"'init.tiff'",
")",
",",
"s",
".",
"flatField",
")",
"smoothed_ff",
",",
"mask",
",",
"flatField",
",",
"obj",
"=",
"s",
".",
"separate",
"(",
")",
"if",
"debugFolder",
":",
"imwrite",
"(",
"debugFolder",
".",
"join",
"(",
"'object.tiff'",
")",
",",
"obj",
")",
"imwrite",
"(",
"debugFolder",
".",
"join",
"(",
"'flatfield.tiff'",
")",
",",
"flatField",
",",
"dtype",
"=",
"float",
")",
"imwrite",
"(",
"debugFolder",
".",
"join",
"(",
"'flatfield_smoothed.tiff'",
")",
",",
"smoothed_ff",
",",
"dtype",
"=",
"float",
")",
"return",
"smoothed_ff",
",",
"mask"
] |
important: first image should shown most iof the device
because it is used as reference
|
[
"important",
":",
"first",
"image",
"should",
"shown",
"most",
"iof",
"the",
"device",
"because",
"it",
"is",
"used",
"as",
"reference"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromRandomSteps.py#L324-L352
|
radjkarl/imgProcessor
|
imgProcessor/camera/flatField/vignettingFromRandomSteps.py
|
ObjectVignettingSeparation.addImg
|
def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12,
borderWidth=3): # borderWidth=100
"""
Args:
img (path or array): image containing the same object as in the reference image
Kwargs:
maxShear (float): In order to define a good fit, refect higher shear values between
this and the reference image
maxRot (float): Same for rotation
minMatches (int): Minimum of mating points found in both, this and the reference image
"""
try:
fit, img, H, H_inv, nmatched = self._fitImg(img)
except Exception as e:
print(e)
return
# CHECK WHETHER FIT IS GOOD ENOUGH:
(translation, rotation, scale, shear) = decompHomography(H)
print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s'
% (translation, rotation, scale, shear))
if (nmatched > minMatches
and abs(shear) < maxShear
and abs(rotation) < maxRot):
print('==> img added')
# HOMOGRAPHY:
self.Hs.append(H)
# INVERSE HOMOGRSAPHY
self.Hinvs.append(H_inv)
# IMAGES WARPED TO THE BASE IMAGE
self.fits.append(fit)
# ADD IMAGE TO THE INITIAL flatField ARRAY:
i = img > self.signal_ranges[-1][0]
# remove borders (that might have erroneous light):
i = minimum_filter(i, borderWidth)
self._ff_mma.update(img, i)
# create fit img mask:
mask = fit < self.signal_ranges[-1][0]
mask = maximum_filter(mask, borderWidth)
# IGNORE BORDER
r = self.remove_border_size
if r:
mask[:r, :] = 1
mask[-r:, :] = 1
mask[:, -r:] = 1
mask[:, :r] = 1
self._fit_masks.append(mask)
# image added
return fit
return False
|
python
|
def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12,
borderWidth=3): # borderWidth=100
"""
Args:
img (path or array): image containing the same object as in the reference image
Kwargs:
maxShear (float): In order to define a good fit, refect higher shear values between
this and the reference image
maxRot (float): Same for rotation
minMatches (int): Minimum of mating points found in both, this and the reference image
"""
try:
fit, img, H, H_inv, nmatched = self._fitImg(img)
except Exception as e:
print(e)
return
# CHECK WHETHER FIT IS GOOD ENOUGH:
(translation, rotation, scale, shear) = decompHomography(H)
print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s'
% (translation, rotation, scale, shear))
if (nmatched > minMatches
and abs(shear) < maxShear
and abs(rotation) < maxRot):
print('==> img added')
# HOMOGRAPHY:
self.Hs.append(H)
# INVERSE HOMOGRSAPHY
self.Hinvs.append(H_inv)
# IMAGES WARPED TO THE BASE IMAGE
self.fits.append(fit)
# ADD IMAGE TO THE INITIAL flatField ARRAY:
i = img > self.signal_ranges[-1][0]
# remove borders (that might have erroneous light):
i = minimum_filter(i, borderWidth)
self._ff_mma.update(img, i)
# create fit img mask:
mask = fit < self.signal_ranges[-1][0]
mask = maximum_filter(mask, borderWidth)
# IGNORE BORDER
r = self.remove_border_size
if r:
mask[:r, :] = 1
mask[-r:, :] = 1
mask[:, -r:] = 1
mask[:, :r] = 1
self._fit_masks.append(mask)
# image added
return fit
return False
|
[
"def",
"addImg",
"(",
"self",
",",
"img",
",",
"maxShear",
"=",
"0.015",
",",
"maxRot",
"=",
"100",
",",
"minMatches",
"=",
"12",
",",
"borderWidth",
"=",
"3",
")",
":",
"# borderWidth=100\r",
"try",
":",
"fit",
",",
"img",
",",
"H",
",",
"H_inv",
",",
"nmatched",
"=",
"self",
".",
"_fitImg",
"(",
"img",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"# CHECK WHETHER FIT IS GOOD ENOUGH:\r",
"(",
"translation",
",",
"rotation",
",",
"scale",
",",
"shear",
")",
"=",
"decompHomography",
"(",
"H",
")",
"print",
"(",
"'Homography ...\\n\\ttranslation: %s\\n\\trotation: %s\\n\\tscale: %s\\n\\tshear: %s'",
"%",
"(",
"translation",
",",
"rotation",
",",
"scale",
",",
"shear",
")",
")",
"if",
"(",
"nmatched",
">",
"minMatches",
"and",
"abs",
"(",
"shear",
")",
"<",
"maxShear",
"and",
"abs",
"(",
"rotation",
")",
"<",
"maxRot",
")",
":",
"print",
"(",
"'==> img added'",
")",
"# HOMOGRAPHY:\r",
"self",
".",
"Hs",
".",
"append",
"(",
"H",
")",
"# INVERSE HOMOGRSAPHY\r",
"self",
".",
"Hinvs",
".",
"append",
"(",
"H_inv",
")",
"# IMAGES WARPED TO THE BASE IMAGE\r",
"self",
".",
"fits",
".",
"append",
"(",
"fit",
")",
"# ADD IMAGE TO THE INITIAL flatField ARRAY:\r",
"i",
"=",
"img",
">",
"self",
".",
"signal_ranges",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"# remove borders (that might have erroneous light):\r",
"i",
"=",
"minimum_filter",
"(",
"i",
",",
"borderWidth",
")",
"self",
".",
"_ff_mma",
".",
"update",
"(",
"img",
",",
"i",
")",
"# create fit img mask:\r",
"mask",
"=",
"fit",
"<",
"self",
".",
"signal_ranges",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"mask",
"=",
"maximum_filter",
"(",
"mask",
",",
"borderWidth",
")",
"# IGNORE BORDER\r",
"r",
"=",
"self",
".",
"remove_border_size",
"if",
"r",
":",
"mask",
"[",
":",
"r",
",",
":",
"]",
"=",
"1",
"mask",
"[",
"-",
"r",
":",
",",
":",
"]",
"=",
"1",
"mask",
"[",
":",
",",
"-",
"r",
":",
"]",
"=",
"1",
"mask",
"[",
":",
",",
":",
"r",
"]",
"=",
"1",
"self",
".",
"_fit_masks",
".",
"append",
"(",
"mask",
")",
"# image added\r",
"return",
"fit",
"return",
"False"
] |
Args:
img (path or array): image containing the same object as in the reference image
Kwargs:
maxShear (float): In order to define a good fit, refect higher shear values between
this and the reference image
maxRot (float): Same for rotation
minMatches (int): Minimum of mating points found in both, this and the reference image
|
[
"Args",
":",
"img",
"(",
"path",
"or",
"array",
")",
":",
"image",
"containing",
"the",
"same",
"object",
"as",
"in",
"the",
"reference",
"image",
"Kwargs",
":",
"maxShear",
"(",
"float",
")",
":",
"In",
"order",
"to",
"define",
"a",
"good",
"fit",
"refect",
"higher",
"shear",
"values",
"between",
"this",
"and",
"the",
"reference",
"image",
"maxRot",
"(",
"float",
")",
":",
"Same",
"for",
"rotation",
"minMatches",
"(",
"int",
")",
":",
"Minimum",
"of",
"mating",
"points",
"found",
"in",
"both",
"this",
"and",
"the",
"reference",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromRandomSteps.py#L114-L167
|
radjkarl/imgProcessor
|
imgProcessor/camera/flatField/vignettingFromRandomSteps.py
|
ObjectVignettingSeparation.error
|
def error(self, nCells=15):
'''
calculate the standard deviation of all fitted images,
averaged to a grid
'''
s0, s1 = self.fits[0].shape
aR = s0 / s1
if aR > 1:
ss0 = int(nCells)
ss1 = int(ss0 / aR)
else:
ss1 = int(nCells)
ss0 = int(ss1 * aR)
L = len(self.fits)
arr = np.array(self.fits)
arr[np.array(self._fit_masks)] = np.nan
avg = np.tile(np.nanmean(arr, axis=0), (L, 1, 1))
arr = (arr - avg) / avg
out = np.empty(shape=(L, ss0, ss1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for n, f in enumerate(arr):
out[n] = subCell2DFnArray(f, np.nanmean, (ss0, ss1))
return np.nanmean(out**2)**0.5
|
python
|
def error(self, nCells=15):
'''
calculate the standard deviation of all fitted images,
averaged to a grid
'''
s0, s1 = self.fits[0].shape
aR = s0 / s1
if aR > 1:
ss0 = int(nCells)
ss1 = int(ss0 / aR)
else:
ss1 = int(nCells)
ss0 = int(ss1 * aR)
L = len(self.fits)
arr = np.array(self.fits)
arr[np.array(self._fit_masks)] = np.nan
avg = np.tile(np.nanmean(arr, axis=0), (L, 1, 1))
arr = (arr - avg) / avg
out = np.empty(shape=(L, ss0, ss1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for n, f in enumerate(arr):
out[n] = subCell2DFnArray(f, np.nanmean, (ss0, ss1))
return np.nanmean(out**2)**0.5
|
[
"def",
"error",
"(",
"self",
",",
"nCells",
"=",
"15",
")",
":",
"s0",
",",
"s1",
"=",
"self",
".",
"fits",
"[",
"0",
"]",
".",
"shape",
"aR",
"=",
"s0",
"/",
"s1",
"if",
"aR",
">",
"1",
":",
"ss0",
"=",
"int",
"(",
"nCells",
")",
"ss1",
"=",
"int",
"(",
"ss0",
"/",
"aR",
")",
"else",
":",
"ss1",
"=",
"int",
"(",
"nCells",
")",
"ss0",
"=",
"int",
"(",
"ss1",
"*",
"aR",
")",
"L",
"=",
"len",
"(",
"self",
".",
"fits",
")",
"arr",
"=",
"np",
".",
"array",
"(",
"self",
".",
"fits",
")",
"arr",
"[",
"np",
".",
"array",
"(",
"self",
".",
"_fit_masks",
")",
"]",
"=",
"np",
".",
"nan",
"avg",
"=",
"np",
".",
"tile",
"(",
"np",
".",
"nanmean",
"(",
"arr",
",",
"axis",
"=",
"0",
")",
",",
"(",
"L",
",",
"1",
",",
"1",
")",
")",
"arr",
"=",
"(",
"arr",
"-",
"avg",
")",
"/",
"avg",
"out",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"L",
",",
"ss0",
",",
"ss1",
")",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
"=",
"RuntimeWarning",
")",
"for",
"n",
",",
"f",
"in",
"enumerate",
"(",
"arr",
")",
":",
"out",
"[",
"n",
"]",
"=",
"subCell2DFnArray",
"(",
"f",
",",
"np",
".",
"nanmean",
",",
"(",
"ss0",
",",
"ss1",
")",
")",
"return",
"np",
".",
"nanmean",
"(",
"out",
"**",
"2",
")",
"**",
"0.5"
] |
calculate the standard deviation of all fitted images,
averaged to a grid
|
[
"calculate",
"the",
"standard",
"deviation",
"of",
"all",
"fitted",
"images",
"averaged",
"to",
"a",
"grid"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromRandomSteps.py#L169-L197
|
radjkarl/imgProcessor
|
imgProcessor/camera/flatField/vignettingFromRandomSteps.py
|
ObjectVignettingSeparation._fitImg
|
def _fitImg(self, img):
'''
fit perspective and size of the input image to the reference image
'''
img = imread(img, 'gray')
if self.bg is not None:
img = cv2.subtract(img, self.bg)
if self.lens is not None:
img = self.lens.correct(img, keepSize=True)
(H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
H_inv = self.invertHomography(H)
s = self.obj_shape
fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
return fit, img, H, H_inv, n_matches
|
python
|
def _fitImg(self, img):
'''
fit perspective and size of the input image to the reference image
'''
img = imread(img, 'gray')
if self.bg is not None:
img = cv2.subtract(img, self.bg)
if self.lens is not None:
img = self.lens.correct(img, keepSize=True)
(H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
H_inv = self.invertHomography(H)
s = self.obj_shape
fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
return fit, img, H, H_inv, n_matches
|
[
"def",
"_fitImg",
"(",
"self",
",",
"img",
")",
":",
"img",
"=",
"imread",
"(",
"img",
",",
"'gray'",
")",
"if",
"self",
".",
"bg",
"is",
"not",
"None",
":",
"img",
"=",
"cv2",
".",
"subtract",
"(",
"img",
",",
"self",
".",
"bg",
")",
"if",
"self",
".",
"lens",
"is",
"not",
"None",
":",
"img",
"=",
"self",
".",
"lens",
".",
"correct",
"(",
"img",
",",
"keepSize",
"=",
"True",
")",
"(",
"H",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"n_matches",
")",
"=",
"self",
".",
"findHomography",
"(",
"img",
")",
"H_inv",
"=",
"self",
".",
"invertHomography",
"(",
"H",
")",
"s",
"=",
"self",
".",
"obj_shape",
"fit",
"=",
"cv2",
".",
"warpPerspective",
"(",
"img",
",",
"H_inv",
",",
"(",
"s",
"[",
"1",
"]",
",",
"s",
"[",
"0",
"]",
")",
")",
"return",
"fit",
",",
"img",
",",
"H",
",",
"H_inv",
",",
"n_matches"
] |
fit perspective and size of the input image to the reference image
|
[
"fit",
"perspective",
"and",
"size",
"of",
"the",
"input",
"image",
"to",
"the",
"reference",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromRandomSteps.py#L294-L310
|
radjkarl/imgProcessor
|
imgProcessor/camera/flatField/vignettingFromRandomSteps.py
|
ObjectVignettingSeparation._findObject
|
def _findObject(self, img):
'''
Create a bounding box around the object within an image
'''
from imgProcessor.imgSignal import signalMinimum
# img is scaled already
i = img > signalMinimum(img) # img.max()/2.5
# filter noise, single-time-effects etc. from mask:
i = minimum_filter(i, 4)
return boundingBox(i)
|
python
|
def _findObject(self, img):
'''
Create a bounding box around the object within an image
'''
from imgProcessor.imgSignal import signalMinimum
# img is scaled already
i = img > signalMinimum(img) # img.max()/2.5
# filter noise, single-time-effects etc. from mask:
i = minimum_filter(i, 4)
return boundingBox(i)
|
[
"def",
"_findObject",
"(",
"self",
",",
"img",
")",
":",
"from",
"imgProcessor",
".",
"imgSignal",
"import",
"signalMinimum",
"# img is scaled already\r",
"i",
"=",
"img",
">",
"signalMinimum",
"(",
"img",
")",
"# img.max()/2.5\r",
"# filter noise, single-time-effects etc. from mask:\r",
"i",
"=",
"minimum_filter",
"(",
"i",
",",
"4",
")",
"return",
"boundingBox",
"(",
"i",
")"
] |
Create a bounding box around the object within an image
|
[
"Create",
"a",
"bounding",
"box",
"around",
"the",
"object",
"within",
"an",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromRandomSteps.py#L312-L321
|
radjkarl/imgProcessor
|
imgProcessor/filters/filterVerticalLines.py
|
filterVerticalLines
|
def filterVerticalLines(arr, min_line_length=4):
"""
Remove vertical lines in boolean array if linelength >=min_line_length
"""
gy = arr.shape[0]
gx = arr.shape[1]
mn = min_line_length-1
for i in range(gy):
for j in range(gx):
if arr[i,j]:
for d in range(min_line_length):
if not arr[i+d,j]:
break
if d == mn:
d = 0
while True:
if not arr[i+d,j]:
break
arr[i+d,j] = 0
d +=1
|
python
|
def filterVerticalLines(arr, min_line_length=4):
"""
Remove vertical lines in boolean array if linelength >=min_line_length
"""
gy = arr.shape[0]
gx = arr.shape[1]
mn = min_line_length-1
for i in range(gy):
for j in range(gx):
if arr[i,j]:
for d in range(min_line_length):
if not arr[i+d,j]:
break
if d == mn:
d = 0
while True:
if not arr[i+d,j]:
break
arr[i+d,j] = 0
d +=1
|
[
"def",
"filterVerticalLines",
"(",
"arr",
",",
"min_line_length",
"=",
"4",
")",
":",
"gy",
"=",
"arr",
".",
"shape",
"[",
"0",
"]",
"gx",
"=",
"arr",
".",
"shape",
"[",
"1",
"]",
"mn",
"=",
"min_line_length",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"gy",
")",
":",
"for",
"j",
"in",
"range",
"(",
"gx",
")",
":",
"if",
"arr",
"[",
"i",
",",
"j",
"]",
":",
"for",
"d",
"in",
"range",
"(",
"min_line_length",
")",
":",
"if",
"not",
"arr",
"[",
"i",
"+",
"d",
",",
"j",
"]",
":",
"break",
"if",
"d",
"==",
"mn",
":",
"d",
"=",
"0",
"while",
"True",
":",
"if",
"not",
"arr",
"[",
"i",
"+",
"d",
",",
"j",
"]",
":",
"break",
"arr",
"[",
"i",
"+",
"d",
",",
"j",
"]",
"=",
"0",
"d",
"+=",
"1"
] |
Remove vertical lines in boolean array if linelength >=min_line_length
|
[
"Remove",
"vertical",
"lines",
"in",
"boolean",
"array",
"if",
"linelength",
">",
"=",
"min_line_length"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/filterVerticalLines.py#L4-L23
|
radjkarl/imgProcessor
|
imgProcessor/equations/vignetting.py
|
vignetting
|
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50):
'''
Vignetting equation using the KANG-WEISS-MODEL
see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf
f - focal length
alpha - coefficient in the geometric vignetting factor
tilt - tilt angle of a planar scene
rot - rotation angle of a planar scene
cx - image center, x
cy - image center, y
'''
x, y = xy
# distance to image center:
dist = ((x - cx)**2 + (y - cy)**2)**0.5
# OFF_AXIS ILLUMINATION FACTOR:
A = 1.0 / (1 + (dist / f)**2)**2
# GEOMETRIC FACTOR:
if alpha != 0:
G = (1 - alpha * dist)
else:
G = 1
# TILT FACTOR:
if tilt != 0:
T = tiltFactor((x, y), f, tilt, rot, (cy, cx))
else:
T = 1
return A * G * T
|
python
|
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50):
'''
Vignetting equation using the KANG-WEISS-MODEL
see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf
f - focal length
alpha - coefficient in the geometric vignetting factor
tilt - tilt angle of a planar scene
rot - rotation angle of a planar scene
cx - image center, x
cy - image center, y
'''
x, y = xy
# distance to image center:
dist = ((x - cx)**2 + (y - cy)**2)**0.5
# OFF_AXIS ILLUMINATION FACTOR:
A = 1.0 / (1 + (dist / f)**2)**2
# GEOMETRIC FACTOR:
if alpha != 0:
G = (1 - alpha * dist)
else:
G = 1
# TILT FACTOR:
if tilt != 0:
T = tiltFactor((x, y), f, tilt, rot, (cy, cx))
else:
T = 1
return A * G * T
|
[
"def",
"vignetting",
"(",
"xy",
",",
"f",
"=",
"100",
",",
"alpha",
"=",
"0",
",",
"rot",
"=",
"0",
",",
"tilt",
"=",
"0",
",",
"cx",
"=",
"50",
",",
"cy",
"=",
"50",
")",
":",
"x",
",",
"y",
"=",
"xy",
"# distance to image center:\r",
"dist",
"=",
"(",
"(",
"x",
"-",
"cx",
")",
"**",
"2",
"+",
"(",
"y",
"-",
"cy",
")",
"**",
"2",
")",
"**",
"0.5",
"# OFF_AXIS ILLUMINATION FACTOR:\r",
"A",
"=",
"1.0",
"/",
"(",
"1",
"+",
"(",
"dist",
"/",
"f",
")",
"**",
"2",
")",
"**",
"2",
"# GEOMETRIC FACTOR:\r",
"if",
"alpha",
"!=",
"0",
":",
"G",
"=",
"(",
"1",
"-",
"alpha",
"*",
"dist",
")",
"else",
":",
"G",
"=",
"1",
"# TILT FACTOR:\r",
"if",
"tilt",
"!=",
"0",
":",
"T",
"=",
"tiltFactor",
"(",
"(",
"x",
",",
"y",
")",
",",
"f",
",",
"tilt",
",",
"rot",
",",
"(",
"cy",
",",
"cx",
")",
")",
"else",
":",
"T",
"=",
"1",
"return",
"A",
"*",
"G",
"*",
"T"
] |
Vignetting equation using the KANG-WEISS-MODEL
see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf
f - focal length
alpha - coefficient in the geometric vignetting factor
tilt - tilt angle of a planar scene
rot - rotation angle of a planar scene
cx - image center, x
cy - image center, y
|
[
"Vignetting",
"equation",
"using",
"the",
"KANG",
"-",
"WEISS",
"-",
"MODEL",
"see",
"http",
":",
"//",
"research",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"um",
"/",
"people",
"/",
"sbkang",
"/",
"publications",
"/",
"eccv00",
".",
"pdf",
"f",
"-",
"focal",
"length",
"alpha",
"-",
"coefficient",
"in",
"the",
"geometric",
"vignetting",
"factor",
"tilt",
"-",
"tilt",
"angle",
"of",
"a",
"planar",
"scene",
"rot",
"-",
"rotation",
"angle",
"of",
"a",
"planar",
"scene",
"cx",
"-",
"image",
"center",
"x",
"cy",
"-",
"image",
"center",
"y"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/vignetting.py#L9-L37
|
radjkarl/imgProcessor
|
imgProcessor/equations/vignetting.py
|
tiltFactor
|
def tiltFactor(xy, f, tilt, rot, center=None):
'''
this function is extra to only cover vignetting through perspective distortion
f - focal length [px]
tau - tilt angle of a planar scene [radian]
rot - rotation angle of a planar scene [radian]
'''
x, y = xy
arr = np.cos(tilt) * (
1 + (np.tan(tilt) / f) * (
x * np.sin(rot) - y * np.cos(rot)))**3
return arr
|
python
|
def tiltFactor(xy, f, tilt, rot, center=None):
'''
this function is extra to only cover vignetting through perspective distortion
f - focal length [px]
tau - tilt angle of a planar scene [radian]
rot - rotation angle of a planar scene [radian]
'''
x, y = xy
arr = np.cos(tilt) * (
1 + (np.tan(tilt) / f) * (
x * np.sin(rot) - y * np.cos(rot)))**3
return arr
|
[
"def",
"tiltFactor",
"(",
"xy",
",",
"f",
",",
"tilt",
",",
"rot",
",",
"center",
"=",
"None",
")",
":",
"x",
",",
"y",
"=",
"xy",
"arr",
"=",
"np",
".",
"cos",
"(",
"tilt",
")",
"*",
"(",
"1",
"+",
"(",
"np",
".",
"tan",
"(",
"tilt",
")",
"/",
"f",
")",
"*",
"(",
"x",
"*",
"np",
".",
"sin",
"(",
"rot",
")",
"-",
"y",
"*",
"np",
".",
"cos",
"(",
"rot",
")",
")",
")",
"**",
"3",
"return",
"arr"
] |
this function is extra to only cover vignetting through perspective distortion
f - focal length [px]
tau - tilt angle of a planar scene [radian]
rot - rotation angle of a planar scene [radian]
|
[
"this",
"function",
"is",
"extra",
"to",
"only",
"cover",
"vignetting",
"through",
"perspective",
"distortion",
"f",
"-",
"focal",
"length",
"[",
"px",
"]",
"tau",
"-",
"tilt",
"angle",
"of",
"a",
"planar",
"scene",
"[",
"radian",
"]",
"rot",
"-",
"rotation",
"angle",
"of",
"a",
"planar",
"scene",
"[",
"radian",
"]"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/vignetting.py#L40-L52
|
radjkarl/imgProcessor
|
imgProcessor/transform/imgAverage.py
|
imgAverage
|
def imgAverage(images, copy=True):
'''
returns an image average
works on many, also unloaded images
minimises RAM usage
'''
i0 = images[0]
out = imread(i0, dtype='float')
if copy and id(i0) == id(out):
out = out.copy()
for i in images[1:]:
out += imread(i, dtype='float')
out /= len(images)
return out
|
python
|
def imgAverage(images, copy=True):
'''
returns an image average
works on many, also unloaded images
minimises RAM usage
'''
i0 = images[0]
out = imread(i0, dtype='float')
if copy and id(i0) == id(out):
out = out.copy()
for i in images[1:]:
out += imread(i, dtype='float')
out /= len(images)
return out
|
[
"def",
"imgAverage",
"(",
"images",
",",
"copy",
"=",
"True",
")",
":",
"i0",
"=",
"images",
"[",
"0",
"]",
"out",
"=",
"imread",
"(",
"i0",
",",
"dtype",
"=",
"'float'",
")",
"if",
"copy",
"and",
"id",
"(",
"i0",
")",
"==",
"id",
"(",
"out",
")",
":",
"out",
"=",
"out",
".",
"copy",
"(",
")",
"for",
"i",
"in",
"images",
"[",
"1",
":",
"]",
":",
"out",
"+=",
"imread",
"(",
"i",
",",
"dtype",
"=",
"'float'",
")",
"out",
"/=",
"len",
"(",
"images",
")",
"return",
"out"
] |
returns an image average
works on many, also unloaded images
minimises RAM usage
|
[
"returns",
"an",
"image",
"average",
"works",
"on",
"many",
"also",
"unloaded",
"images",
"minimises",
"RAM",
"usage"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/imgAverage.py#L7-L22
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/offsetMeshgrid.py
|
offsetMeshgrid
|
def offsetMeshgrid(offset, grid, shape):
'''
Imagine you have cell averages [grid] on an image.
the top-left position of [grid] within the image
can be variable [offset]
offset(x,y)
e.g.(0,0) if no offset
grid(nx,ny) resolution of smaller grid
shape(x,y) -> output shape
returns meshgrid to be used to upscale [grid] to [shape] resolution
'''
g0,g1 = grid
s0,s1 = shape
o0, o1 = offset
#rescale to small grid:
o0 = - o0/ s0 * (g0-1)
o1 = - o1/ s1 * (g1-1)
xx,yy = np.meshgrid(np.linspace(o1, o1+g1-1, s1),
np.linspace(o0,o0+g0-1, s0))
return yy,xx
|
python
|
def offsetMeshgrid(offset, grid, shape):
'''
Imagine you have cell averages [grid] on an image.
the top-left position of [grid] within the image
can be variable [offset]
offset(x,y)
e.g.(0,0) if no offset
grid(nx,ny) resolution of smaller grid
shape(x,y) -> output shape
returns meshgrid to be used to upscale [grid] to [shape] resolution
'''
g0,g1 = grid
s0,s1 = shape
o0, o1 = offset
#rescale to small grid:
o0 = - o0/ s0 * (g0-1)
o1 = - o1/ s1 * (g1-1)
xx,yy = np.meshgrid(np.linspace(o1, o1+g1-1, s1),
np.linspace(o0,o0+g0-1, s0))
return yy,xx
|
[
"def",
"offsetMeshgrid",
"(",
"offset",
",",
"grid",
",",
"shape",
")",
":",
"g0",
",",
"g1",
"=",
"grid",
"s0",
",",
"s1",
"=",
"shape",
"o0",
",",
"o1",
"=",
"offset",
"#rescale to small grid:\r",
"o0",
"=",
"-",
"o0",
"/",
"s0",
"*",
"(",
"g0",
"-",
"1",
")",
"o1",
"=",
"-",
"o1",
"/",
"s1",
"*",
"(",
"g1",
"-",
"1",
")",
"xx",
",",
"yy",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"linspace",
"(",
"o1",
",",
"o1",
"+",
"g1",
"-",
"1",
",",
"s1",
")",
",",
"np",
".",
"linspace",
"(",
"o0",
",",
"o0",
"+",
"g0",
"-",
"1",
",",
"s0",
")",
")",
"return",
"yy",
",",
"xx"
] |
Imagine you have cell averages [grid] on an image.
the top-left position of [grid] within the image
can be variable [offset]
offset(x,y)
e.g.(0,0) if no offset
grid(nx,ny) resolution of smaller grid
shape(x,y) -> output shape
returns meshgrid to be used to upscale [grid] to [shape] resolution
|
[
"Imagine",
"you",
"have",
"cell",
"averages",
"[",
"grid",
"]",
"on",
"an",
"image",
".",
"the",
"top",
"-",
"left",
"position",
"of",
"[",
"grid",
"]",
"within",
"the",
"image",
"can",
"be",
"variable",
"[",
"offset",
"]",
"offset",
"(",
"x",
"y",
")",
"e",
".",
"g",
".",
"(",
"0",
"0",
")",
"if",
"no",
"offset",
"grid",
"(",
"nx",
"ny",
")",
"resolution",
"of",
"smaller",
"grid",
"shape",
"(",
"x",
"y",
")",
"-",
">",
"output",
"shape",
"returns",
"meshgrid",
"to",
"be",
"used",
"to",
"upscale",
"[",
"grid",
"]",
"to",
"[",
"shape",
"]",
"resolution"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/offsetMeshgrid.py#L5-L27
|
radjkarl/imgProcessor
|
imgProcessor/equations/poisson.py
|
poisson
|
def poisson(x, a, b, c, d=0):
'''
Poisson function
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation
d -> offset
'''
from scipy.misc import factorial #save startup time
lamb = 1
X = (x/(2*c)).astype(int)
return a * (( lamb**X/factorial(X)) * np.exp(-lamb) ) +d
|
python
|
def poisson(x, a, b, c, d=0):
'''
Poisson function
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation
d -> offset
'''
from scipy.misc import factorial #save startup time
lamb = 1
X = (x/(2*c)).astype(int)
return a * (( lamb**X/factorial(X)) * np.exp(-lamb) ) +d
|
[
"def",
"poisson",
"(",
"x",
",",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"0",
")",
":",
"from",
"scipy",
".",
"misc",
"import",
"factorial",
"#save startup time\r",
"lamb",
"=",
"1",
"X",
"=",
"(",
"x",
"/",
"(",
"2",
"*",
"c",
")",
")",
".",
"astype",
"(",
"int",
")",
"return",
"a",
"*",
"(",
"(",
"lamb",
"**",
"X",
"/",
"factorial",
"(",
"X",
")",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"lamb",
")",
")",
"+",
"d"
] |
Poisson function
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation
d -> offset
|
[
"Poisson",
"function",
"a",
"-",
">",
"height",
"of",
"the",
"curve",
"s",
"peak",
"b",
"-",
">",
"position",
"of",
"the",
"center",
"of",
"the",
"peak",
"c",
"-",
">",
"standard",
"deviation",
"d",
"-",
">",
"offset"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/poisson.py#L4-L15
|
radjkarl/imgProcessor
|
imgProcessor/transform/rotate.py
|
rotate
|
def rotate(image, angle, interpolation=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REFLECT, borderValue=0):
'''
angle [deg]
'''
s0, s1 = image.shape
image_center = (s0 - 1) / 2., (s1 - 1) / 2.
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape,
flags=interpolation, borderMode=borderMode,
borderValue=borderValue)
return result
|
python
|
def rotate(image, angle, interpolation=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REFLECT, borderValue=0):
'''
angle [deg]
'''
s0, s1 = image.shape
image_center = (s0 - 1) / 2., (s1 - 1) / 2.
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape,
flags=interpolation, borderMode=borderMode,
borderValue=borderValue)
return result
|
[
"def",
"rotate",
"(",
"image",
",",
"angle",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_CUBIC",
",",
"borderMode",
"=",
"cv2",
".",
"BORDER_REFLECT",
",",
"borderValue",
"=",
"0",
")",
":",
"s0",
",",
"s1",
"=",
"image",
".",
"shape",
"image_center",
"=",
"(",
"s0",
"-",
"1",
")",
"/",
"2.",
",",
"(",
"s1",
"-",
"1",
")",
"/",
"2.",
"rot_mat",
"=",
"cv2",
".",
"getRotationMatrix2D",
"(",
"image_center",
",",
"angle",
",",
"1.0",
")",
"result",
"=",
"cv2",
".",
"warpAffine",
"(",
"image",
",",
"rot_mat",
",",
"image",
".",
"shape",
",",
"flags",
"=",
"interpolation",
",",
"borderMode",
"=",
"borderMode",
",",
"borderValue",
"=",
"borderValue",
")",
"return",
"result"
] |
angle [deg]
|
[
"angle",
"[",
"deg",
"]"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/rotate.py#L8-L19
|
radjkarl/imgProcessor
|
imgProcessor/uncertainty/adjustUncertToExposureTime.py
|
adjustUncertToExposureTime
|
def adjustUncertToExposureTime(facExpTime, uncertMap, evtLenMap):
'''
Adjust image uncertainty (measured at exposure time t0)
to new exposure time
facExpTime --> new exp.time / reference exp.time =(t/t0)
uncertMap --> 2d array mapping image uncertainty
evtLen --> 2d array mapping event duration within image [sec]
event duration is relative to exposure time
e.g. duration = 2 means event is 2x longer than
exposure time
More information can be found at ...
----
K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017
Subsection 5.1.4.3: Exposure Time Dependency
----
'''
#fit parameters, obtained from ####[simulateUncertDependencyOnExpTime]
params = np.array(
#a facExpTime f_0 f_inf
[[ 2.63017121e+00, 3.05873627e-01, 1.00000000e+01, 2.78233309e-01],
[ 2.26467931e+00, 2.86206621e-01, 8.01396977e+00, 2.04089232e-01],
[ 1.27361168e+00, 5.18377189e-01, 3.04180084e+00, 2.61396338e-01],
[ 7.34546040e-01, 7.34549823e-01, 1.86507345e+00, 2.77563156e-01],
[ 3.82715618e-01, 9.32410141e-01, 1.34510254e+00, 2.91228149e-01],
[ 1.71166071e-01, 1.14092885e+00, 1.11243702e+00, 3.07947386e-01],
[ 6.13455410e-02, 1.43802520e+00, 1.02995065e+00, 3.93920802e-01],
[ 1.65383071e-02, 1.75605076e+00, 1.00859395e+00, 5.02132321e-01],
[ 4.55800114e-03, 1.99855711e+00, 9.98819118e-01, 5.99572776e-01]])
#event duration relative to exposure time:(1/16...16)
dur = np.array([ 0.0625, 0.125 , 0.25 ,
0.5 , 1. , 2. ,
4. , 8. , 16. ])
#get factors from interpolation:
a = UnivariateSpline(dur, params[:, 0], k=3, s=0)
b = UnivariateSpline(dur, params[:, 1], k=3, s=0)
start = UnivariateSpline(dur, params[:, 2], k=3, s=0)
end = UnivariateSpline(dur, params[:, 3], k=3, s=0)
p0 = a(evtLenMap), b(evtLenMap), start(evtLenMap), end(evtLenMap)
#uncertainty for new exposure time:
out = uncertMap * _fitfn(facExpTime, *p0)
# everywhere where there ARE NO EVENTS --> scale uncert. as if would
# be normal distributed:
i = evtLenMap == 0
out[i] = uncertMap[i] * (1 / facExpTime)**0.5
return out
|
python
|
def adjustUncertToExposureTime(facExpTime, uncertMap, evtLenMap):
'''
Adjust image uncertainty (measured at exposure time t0)
to new exposure time
facExpTime --> new exp.time / reference exp.time =(t/t0)
uncertMap --> 2d array mapping image uncertainty
evtLen --> 2d array mapping event duration within image [sec]
event duration is relative to exposure time
e.g. duration = 2 means event is 2x longer than
exposure time
More information can be found at ...
----
K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017
Subsection 5.1.4.3: Exposure Time Dependency
----
'''
#fit parameters, obtained from ####[simulateUncertDependencyOnExpTime]
params = np.array(
#a facExpTime f_0 f_inf
[[ 2.63017121e+00, 3.05873627e-01, 1.00000000e+01, 2.78233309e-01],
[ 2.26467931e+00, 2.86206621e-01, 8.01396977e+00, 2.04089232e-01],
[ 1.27361168e+00, 5.18377189e-01, 3.04180084e+00, 2.61396338e-01],
[ 7.34546040e-01, 7.34549823e-01, 1.86507345e+00, 2.77563156e-01],
[ 3.82715618e-01, 9.32410141e-01, 1.34510254e+00, 2.91228149e-01],
[ 1.71166071e-01, 1.14092885e+00, 1.11243702e+00, 3.07947386e-01],
[ 6.13455410e-02, 1.43802520e+00, 1.02995065e+00, 3.93920802e-01],
[ 1.65383071e-02, 1.75605076e+00, 1.00859395e+00, 5.02132321e-01],
[ 4.55800114e-03, 1.99855711e+00, 9.98819118e-01, 5.99572776e-01]])
#event duration relative to exposure time:(1/16...16)
dur = np.array([ 0.0625, 0.125 , 0.25 ,
0.5 , 1. , 2. ,
4. , 8. , 16. ])
#get factors from interpolation:
a = UnivariateSpline(dur, params[:, 0], k=3, s=0)
b = UnivariateSpline(dur, params[:, 1], k=3, s=0)
start = UnivariateSpline(dur, params[:, 2], k=3, s=0)
end = UnivariateSpline(dur, params[:, 3], k=3, s=0)
p0 = a(evtLenMap), b(evtLenMap), start(evtLenMap), end(evtLenMap)
#uncertainty for new exposure time:
out = uncertMap * _fitfn(facExpTime, *p0)
# everywhere where there ARE NO EVENTS --> scale uncert. as if would
# be normal distributed:
i = evtLenMap == 0
out[i] = uncertMap[i] * (1 / facExpTime)**0.5
return out
|
[
"def",
"adjustUncertToExposureTime",
"(",
"facExpTime",
",",
"uncertMap",
",",
"evtLenMap",
")",
":",
"#fit parameters, obtained from ####[simulateUncertDependencyOnExpTime]\r",
"params",
"=",
"np",
".",
"array",
"(",
"#a facExpTime f_0 f_inf \r",
"[",
"[",
"2.63017121e+00",
",",
"3.05873627e-01",
",",
"1.00000000e+01",
",",
"2.78233309e-01",
"]",
",",
"[",
"2.26467931e+00",
",",
"2.86206621e-01",
",",
"8.01396977e+00",
",",
"2.04089232e-01",
"]",
",",
"[",
"1.27361168e+00",
",",
"5.18377189e-01",
",",
"3.04180084e+00",
",",
"2.61396338e-01",
"]",
",",
"[",
"7.34546040e-01",
",",
"7.34549823e-01",
",",
"1.86507345e+00",
",",
"2.77563156e-01",
"]",
",",
"[",
"3.82715618e-01",
",",
"9.32410141e-01",
",",
"1.34510254e+00",
",",
"2.91228149e-01",
"]",
",",
"[",
"1.71166071e-01",
",",
"1.14092885e+00",
",",
"1.11243702e+00",
",",
"3.07947386e-01",
"]",
",",
"[",
"6.13455410e-02",
",",
"1.43802520e+00",
",",
"1.02995065e+00",
",",
"3.93920802e-01",
"]",
",",
"[",
"1.65383071e-02",
",",
"1.75605076e+00",
",",
"1.00859395e+00",
",",
"5.02132321e-01",
"]",
",",
"[",
"4.55800114e-03",
",",
"1.99855711e+00",
",",
"9.98819118e-01",
",",
"5.99572776e-01",
"]",
"]",
")",
"#event duration relative to exposure time:(1/16...16)\r",
"dur",
"=",
"np",
".",
"array",
"(",
"[",
"0.0625",
",",
"0.125",
",",
"0.25",
",",
"0.5",
",",
"1.",
",",
"2.",
",",
"4.",
",",
"8.",
",",
"16.",
"]",
")",
"#get factors from interpolation:\r",
"a",
"=",
"UnivariateSpline",
"(",
"dur",
",",
"params",
"[",
":",
",",
"0",
"]",
",",
"k",
"=",
"3",
",",
"s",
"=",
"0",
")",
"b",
"=",
"UnivariateSpline",
"(",
"dur",
",",
"params",
"[",
":",
",",
"1",
"]",
",",
"k",
"=",
"3",
",",
"s",
"=",
"0",
")",
"start",
"=",
"UnivariateSpline",
"(",
"dur",
",",
"params",
"[",
":",
",",
"2",
"]",
",",
"k",
"=",
"3",
",",
"s",
"=",
"0",
")",
"end",
"=",
"UnivariateSpline",
"(",
"dur",
",",
"params",
"[",
":",
",",
"3",
"]",
",",
"k",
"=",
"3",
",",
"s",
"=",
"0",
")",
"p0",
"=",
"a",
"(",
"evtLenMap",
")",
",",
"b",
"(",
"evtLenMap",
")",
",",
"start",
"(",
"evtLenMap",
")",
",",
"end",
"(",
"evtLenMap",
")",
"#uncertainty for new exposure time:\r",
"out",
"=",
"uncertMap",
"*",
"_fitfn",
"(",
"facExpTime",
",",
"*",
"p0",
")",
"# everywhere where there ARE NO EVENTS --> scale uncert. as if would\r",
"# be normal distributed:\r",
"i",
"=",
"evtLenMap",
"==",
"0",
"out",
"[",
"i",
"]",
"=",
"uncertMap",
"[",
"i",
"]",
"*",
"(",
"1",
"/",
"facExpTime",
")",
"**",
"0.5",
"return",
"out"
] |
Adjust image uncertainty (measured at exposure time t0)
to new exposure time
facExpTime --> new exp.time / reference exp.time =(t/t0)
uncertMap --> 2d array mapping image uncertainty
evtLen --> 2d array mapping event duration within image [sec]
event duration is relative to exposure time
e.g. duration = 2 means event is 2x longer than
exposure time
More information can be found at ...
----
K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017
Subsection 5.1.4.3: Exposure Time Dependency
----
|
[
"Adjust",
"image",
"uncertainty",
"(",
"measured",
"at",
"exposure",
"time",
"t0",
")",
"to",
"new",
"exposure",
"time",
"facExpTime",
"--",
">",
"new",
"exp",
".",
"time",
"/",
"reference",
"exp",
".",
"time",
"=",
"(",
"t",
"/",
"t0",
")",
"uncertMap",
"--",
">",
"2d",
"array",
"mapping",
"image",
"uncertainty",
"evtLen",
"--",
">",
"2d",
"array",
"mapping",
"event",
"duration",
"within",
"image",
"[",
"sec",
"]",
"event",
"duration",
"is",
"relative",
"to",
"exposure",
"time",
"e",
".",
"g",
".",
"duration",
"=",
"2",
"means",
"event",
"is",
"2x",
"longer",
"than",
"exposure",
"time",
"More",
"information",
"can",
"be",
"found",
"at",
"...",
"----",
"K",
".",
"Bedrich",
":",
"Quantitative",
"Electroluminescence",
"Imaging",
"PhD",
"Thesis",
"2017",
"Subsection",
"5",
".",
"1",
".",
"4",
".",
"3",
":",
"Exposure",
"Time",
"Dependency",
"----"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/adjustUncertToExposureTime.py#L10-L59
|
radjkarl/imgProcessor
|
imgProcessor/equations/gaussian.py
|
gaussian
|
def gaussian(x, a, b, c, d=0):
'''
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation or Gaussian RMS width
d -> offset
'''
return a * np.exp( -(((x-b)**2 )/ (2*(c**2))) ) + d
|
python
|
def gaussian(x, a, b, c, d=0):
'''
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation or Gaussian RMS width
d -> offset
'''
return a * np.exp( -(((x-b)**2 )/ (2*(c**2))) ) + d
|
[
"def",
"gaussian",
"(",
"x",
",",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"0",
")",
":",
"return",
"a",
"*",
"np",
".",
"exp",
"(",
"-",
"(",
"(",
"(",
"x",
"-",
"b",
")",
"**",
"2",
")",
"/",
"(",
"2",
"*",
"(",
"c",
"**",
"2",
")",
")",
")",
")",
"+",
"d"
] |
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation or Gaussian RMS width
d -> offset
|
[
"a",
"-",
">",
"height",
"of",
"the",
"curve",
"s",
"peak",
"b",
"-",
">",
"position",
"of",
"the",
"center",
"of",
"the",
"peak",
"c",
"-",
">",
"standard",
"deviation",
"or",
"Gaussian",
"RMS",
"width",
"d",
"-",
">",
"offset"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/gaussian.py#L6-L13
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/videoWrite.py
|
videoWrite
|
def videoWrite(path, imgs, levels=None, shape=None, frames=15,
annotate_names=None,
lut=None, updateFn=None):
'''
TODO
'''
frames = int(frames)
if annotate_names is not None:
assert len(annotate_names) == len(imgs)
if levels is None:
if imgs[0].dtype == np.uint8:
levels = 0, 255
elif imgs[0].dtype == np.uint16:
levels = 0, 2**16 - 1
else:
levels = np.min(imgs), np.max(imgs)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
h, w = imgs.shape[1:3]
if shape and shape != (h, w):
h, w = shape
imgs = [cv2.resize(i, (w, h)) for i in imgs]
assert path[-3:] in ('avi',
'png'), 'video export only supports *.avi or *.png'
isVideo = path[-3:] == 'avi'
if isVideo:
cap = cv2.VideoCapture(0)
# im.ndim==4)
out = cv2.VideoWriter(path, fourcc, frames, (w, h), isColor=1)
times = np.linspace(0, len(imgs) - 1, len(imgs) * frames)
interpolator = LinearInterpolateImageStack(imgs)
if lut is not None:
lut = lut(imgs[0])
for n, time in enumerate(times):
if updateFn:
# update progress:
updateFn.emit(100 * n / len(times))
image = interpolator(time)
cimg = makeRGBA(image, lut=lut,
levels=levels)[0]
cimg = cv2.cvtColor(cimg, cv2.COLOR_RGBA2BGR)
if annotate_names:
text = annotate_names[n // frames]
alpha = 0.5
org = (0, cimg.shape[0])
fontFace = cv2.FONT_HERSHEY_PLAIN
fontScale = 2
thickness = 3
putTextAlpha(cimg, text, alpha, org, fontFace, fontScale,
(0, 255, 0), thickness
)
if isVideo:
out.write(cimg)
else:
cv2.imwrite('%s_%i_%.3f.png' % (path[:-4], n, time), cimg)
if isVideo:
cap.release()
out.release()
|
python
|
def videoWrite(path, imgs, levels=None, shape=None, frames=15,
annotate_names=None,
lut=None, updateFn=None):
'''
TODO
'''
frames = int(frames)
if annotate_names is not None:
assert len(annotate_names) == len(imgs)
if levels is None:
if imgs[0].dtype == np.uint8:
levels = 0, 255
elif imgs[0].dtype == np.uint16:
levels = 0, 2**16 - 1
else:
levels = np.min(imgs), np.max(imgs)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
h, w = imgs.shape[1:3]
if shape and shape != (h, w):
h, w = shape
imgs = [cv2.resize(i, (w, h)) for i in imgs]
assert path[-3:] in ('avi',
'png'), 'video export only supports *.avi or *.png'
isVideo = path[-3:] == 'avi'
if isVideo:
cap = cv2.VideoCapture(0)
# im.ndim==4)
out = cv2.VideoWriter(path, fourcc, frames, (w, h), isColor=1)
times = np.linspace(0, len(imgs) - 1, len(imgs) * frames)
interpolator = LinearInterpolateImageStack(imgs)
if lut is not None:
lut = lut(imgs[0])
for n, time in enumerate(times):
if updateFn:
# update progress:
updateFn.emit(100 * n / len(times))
image = interpolator(time)
cimg = makeRGBA(image, lut=lut,
levels=levels)[0]
cimg = cv2.cvtColor(cimg, cv2.COLOR_RGBA2BGR)
if annotate_names:
text = annotate_names[n // frames]
alpha = 0.5
org = (0, cimg.shape[0])
fontFace = cv2.FONT_HERSHEY_PLAIN
fontScale = 2
thickness = 3
putTextAlpha(cimg, text, alpha, org, fontFace, fontScale,
(0, 255, 0), thickness
)
if isVideo:
out.write(cimg)
else:
cv2.imwrite('%s_%i_%.3f.png' % (path[:-4], n, time), cimg)
if isVideo:
cap.release()
out.release()
|
[
"def",
"videoWrite",
"(",
"path",
",",
"imgs",
",",
"levels",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"frames",
"=",
"15",
",",
"annotate_names",
"=",
"None",
",",
"lut",
"=",
"None",
",",
"updateFn",
"=",
"None",
")",
":",
"frames",
"=",
"int",
"(",
"frames",
")",
"if",
"annotate_names",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"annotate_names",
")",
"==",
"len",
"(",
"imgs",
")",
"if",
"levels",
"is",
"None",
":",
"if",
"imgs",
"[",
"0",
"]",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"levels",
"=",
"0",
",",
"255",
"elif",
"imgs",
"[",
"0",
"]",
".",
"dtype",
"==",
"np",
".",
"uint16",
":",
"levels",
"=",
"0",
",",
"2",
"**",
"16",
"-",
"1",
"else",
":",
"levels",
"=",
"np",
".",
"min",
"(",
"imgs",
")",
",",
"np",
".",
"max",
"(",
"imgs",
")",
"fourcc",
"=",
"cv2",
".",
"VideoWriter_fourcc",
"(",
"*",
"'XVID'",
")",
"h",
",",
"w",
"=",
"imgs",
".",
"shape",
"[",
"1",
":",
"3",
"]",
"if",
"shape",
"and",
"shape",
"!=",
"(",
"h",
",",
"w",
")",
":",
"h",
",",
"w",
"=",
"shape",
"imgs",
"=",
"[",
"cv2",
".",
"resize",
"(",
"i",
",",
"(",
"w",
",",
"h",
")",
")",
"for",
"i",
"in",
"imgs",
"]",
"assert",
"path",
"[",
"-",
"3",
":",
"]",
"in",
"(",
"'avi'",
",",
"'png'",
")",
",",
"'video export only supports *.avi or *.png'",
"isVideo",
"=",
"path",
"[",
"-",
"3",
":",
"]",
"==",
"'avi'",
"if",
"isVideo",
":",
"cap",
"=",
"cv2",
".",
"VideoCapture",
"(",
"0",
")",
"# im.ndim==4)\r",
"out",
"=",
"cv2",
".",
"VideoWriter",
"(",
"path",
",",
"fourcc",
",",
"frames",
",",
"(",
"w",
",",
"h",
")",
",",
"isColor",
"=",
"1",
")",
"times",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"imgs",
")",
"-",
"1",
",",
"len",
"(",
"imgs",
")",
"*",
"frames",
")",
"interpolator",
"=",
"LinearInterpolateImageStack",
"(",
"imgs",
")",
"if",
"lut",
"is",
"not",
"None",
":",
"lut",
"=",
"lut",
"(",
"imgs",
"[",
"0",
"]",
")",
"for",
"n",
",",
"time",
"in",
"enumerate",
"(",
"times",
")",
":",
"if",
"updateFn",
":",
"# update progress:\r",
"updateFn",
".",
"emit",
"(",
"100",
"*",
"n",
"/",
"len",
"(",
"times",
")",
")",
"image",
"=",
"interpolator",
"(",
"time",
")",
"cimg",
"=",
"makeRGBA",
"(",
"image",
",",
"lut",
"=",
"lut",
",",
"levels",
"=",
"levels",
")",
"[",
"0",
"]",
"cimg",
"=",
"cv2",
".",
"cvtColor",
"(",
"cimg",
",",
"cv2",
".",
"COLOR_RGBA2BGR",
")",
"if",
"annotate_names",
":",
"text",
"=",
"annotate_names",
"[",
"n",
"//",
"frames",
"]",
"alpha",
"=",
"0.5",
"org",
"=",
"(",
"0",
",",
"cimg",
".",
"shape",
"[",
"0",
"]",
")",
"fontFace",
"=",
"cv2",
".",
"FONT_HERSHEY_PLAIN",
"fontScale",
"=",
"2",
"thickness",
"=",
"3",
"putTextAlpha",
"(",
"cimg",
",",
"text",
",",
"alpha",
",",
"org",
",",
"fontFace",
",",
"fontScale",
",",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"thickness",
")",
"if",
"isVideo",
":",
"out",
".",
"write",
"(",
"cimg",
")",
"else",
":",
"cv2",
".",
"imwrite",
"(",
"'%s_%i_%.3f.png'",
"%",
"(",
"path",
"[",
":",
"-",
"4",
"]",
",",
"n",
",",
"time",
")",
",",
"cimg",
")",
"if",
"isVideo",
":",
"cap",
".",
"release",
"(",
")",
"out",
".",
"release",
"(",
")"
] |
TODO
|
[
"TODO"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/videoWrite.py#L14-L81
|
radjkarl/imgProcessor
|
imgProcessor/imgIO.py
|
imread
|
def imread(img, color=None, dtype=None):
'''
dtype = 'noUint', uint8, float, 'float', ...
'''
COLOR2CV = {'gray': cv2.IMREAD_GRAYSCALE,
'all': cv2.IMREAD_COLOR,
None: cv2.IMREAD_ANYCOLOR
}
c = COLOR2CV[color]
if callable(img):
img = img()
elif isinstance(img, string_types):
# from_file = True
# try:
# ftype = img[img.find('.'):]
# img = READERS[ftype](img)[0]
# except KeyError:
# open with openCV
# grey - 8 bit
if dtype in (None, "noUint") or np.dtype(dtype) != np.uint8:
c |= cv2.IMREAD_ANYDEPTH
img2 = cv2.imread(img, c)
if img2 is None:
raise IOError("image '%s' is not existing" % img)
img = img2
elif color == 'gray' and img.ndim == 3: # multi channel img like rgb
# cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cannot handle float64
img = toGray(img)
# transform array to uint8 array due to openCV restriction
if dtype is not None:
if isinstance(img, np.ndarray):
img = _changeArrayDType(img, dtype, cutHigh=False)
return img
|
python
|
def imread(img, color=None, dtype=None):
'''
dtype = 'noUint', uint8, float, 'float', ...
'''
COLOR2CV = {'gray': cv2.IMREAD_GRAYSCALE,
'all': cv2.IMREAD_COLOR,
None: cv2.IMREAD_ANYCOLOR
}
c = COLOR2CV[color]
if callable(img):
img = img()
elif isinstance(img, string_types):
# from_file = True
# try:
# ftype = img[img.find('.'):]
# img = READERS[ftype](img)[0]
# except KeyError:
# open with openCV
# grey - 8 bit
if dtype in (None, "noUint") or np.dtype(dtype) != np.uint8:
c |= cv2.IMREAD_ANYDEPTH
img2 = cv2.imread(img, c)
if img2 is None:
raise IOError("image '%s' is not existing" % img)
img = img2
elif color == 'gray' and img.ndim == 3: # multi channel img like rgb
# cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cannot handle float64
img = toGray(img)
# transform array to uint8 array due to openCV restriction
if dtype is not None:
if isinstance(img, np.ndarray):
img = _changeArrayDType(img, dtype, cutHigh=False)
return img
|
[
"def",
"imread",
"(",
"img",
",",
"color",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"COLOR2CV",
"=",
"{",
"'gray'",
":",
"cv2",
".",
"IMREAD_GRAYSCALE",
",",
"'all'",
":",
"cv2",
".",
"IMREAD_COLOR",
",",
"None",
":",
"cv2",
".",
"IMREAD_ANYCOLOR",
"}",
"c",
"=",
"COLOR2CV",
"[",
"color",
"]",
"if",
"callable",
"(",
"img",
")",
":",
"img",
"=",
"img",
"(",
")",
"elif",
"isinstance",
"(",
"img",
",",
"string_types",
")",
":",
"# from_file = True\r",
"# try:\r",
"# ftype = img[img.find('.'):]\r",
"# img = READERS[ftype](img)[0]\r",
"# except KeyError:\r",
"# open with openCV\r",
"# grey - 8 bit\r",
"if",
"dtype",
"in",
"(",
"None",
",",
"\"noUint\"",
")",
"or",
"np",
".",
"dtype",
"(",
"dtype",
")",
"!=",
"np",
".",
"uint8",
":",
"c",
"|=",
"cv2",
".",
"IMREAD_ANYDEPTH",
"img2",
"=",
"cv2",
".",
"imread",
"(",
"img",
",",
"c",
")",
"if",
"img2",
"is",
"None",
":",
"raise",
"IOError",
"(",
"\"image '%s' is not existing\"",
"%",
"img",
")",
"img",
"=",
"img2",
"elif",
"color",
"==",
"'gray'",
"and",
"img",
".",
"ndim",
"==",
"3",
":",
"# multi channel img like rgb\r",
"# cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cannot handle float64\r",
"img",
"=",
"toGray",
"(",
"img",
")",
"# transform array to uint8 array due to openCV restriction\r",
"if",
"dtype",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"img",
",",
"np",
".",
"ndarray",
")",
":",
"img",
"=",
"_changeArrayDType",
"(",
"img",
",",
"dtype",
",",
"cutHigh",
"=",
"False",
")",
"return",
"img"
] |
dtype = 'noUint', uint8, float, 'float', ...
|
[
"dtype",
"=",
"noUint",
"uint8",
"float",
"float",
"..."
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/imgIO.py#L39-L73
|
radjkarl/imgProcessor
|
imgProcessor/measure/sharpness/SharpnessfromPoints.py
|
SharpnessfromPointSources.addImg
|
def addImg(self, img, roi=None):
'''
img - background, flat field, ste corrected image
roi - [(x1,y1),...,(x4,y4)] - boundaries where points are
'''
self.img = imread(img, 'gray')
s0, s1 = self.img.shape
if roi is None:
roi = ((0, 0), (s0, 0), (s0, s1), (0, s1))
k = self.kernel_size
hk = k // 2
# mask image
img2 = self.img.copy() # .astype(int)
mask = np.zeros(self.img.shape)
cv2.fillConvexPoly(mask, np.asarray(roi, dtype=np.int32), color=1)
mask = mask.astype(bool)
im = img2[mask]
bg = im.mean() # assume image average with in roi == background
mask = ~mask
img2[mask] = -1
# find points from local maxima:
self.points = np.zeros(shape=(self.max_points, 2), dtype=int)
thresh = 0.8 * bg + 0.2 * im.max()
_findPoints(img2, thresh, self.min_dist, self.points)
self.points = self.points[:np.argmin(self.points, axis=0)[0]]
# correct point position, to that every point is over max value:
for n, p in enumerate(self.points):
sub = self.img[p[1] - hk:p[1] + hk + 1, p[0] - hk:p[0] + hk + 1]
i, j = np.unravel_index(np.nanargmax(sub), sub.shape)
self.points[n] += [j - hk, i - hk]
# remove points that are too close to their neighbour or the border
mask = maximum_filter(mask, hk)
i = np.ones(self.points.shape[0], dtype=bool)
for n, p in enumerate(self.points):
if mask[p[1], p[0]]: # too close to border
i[n] = False
else:
# too close to other points
for pp in self.points[n + 1:]:
if norm(p - pp) < hk + 1:
i[n] = False
isum = i.sum()
ll = len(i) - isum
print('found %s points' % isum)
if ll:
print(
'removed %s points (too close to border or other points)' %
ll)
self.points = self.points[i]
# self.n_points += len(self.points)
# for finding best peak position:
# def fn(xy,cx,cy):#par
# (x,y) = xy
# return 1-(((x-cx)**2 + (y-cy)**2)*(1/8)).flatten()
# x,y = np.mgrid[-2:3,-2:3]
# x = x.flatten()
# y = y.flatten()
# for shifting peak:
xx, yy = np.mgrid[0:k, 0:k]
xx = xx.astype(float)
yy = yy.astype(float)
self.subs = []
# import pylab as plt
# plt.figure(20)
# img = self.drawPoints()
# plt.imshow(img, interpolation='none')
# # plt.figure(21)
# # plt.imshow(sub2, interpolation='none')
# plt.show()
#thresh = 0.8*bg + 0.1*im.max()
for i, p in enumerate(self.points):
sub = self.img[p[1] - hk:p[1] + hk + 1,
p[0] - hk:p[0] + hk + 1].astype(float)
sub2 = sub.copy()
mean = sub2.mean()
mx = sub2.max()
sub2[sub2 < 0.5 * (mean + mx)] = 0 # only select peak
try:
# SHIFT SUB ARRAY to align peak maximum exactly in middle:
# only eval a 5x5 array in middle of sub:
# peak = sub[hk-3:hk+4,hk-3:hk+4]#.copy()
# peak -= peak.min()
# peak/=peak.max()
# peak = peak.flatten()
# fit paraboloid to get shift in x,y:
# p, _ = curve_fit(fn, (x,y), peak, (0,0))
c0, c1 = center_of_mass(sub2)
# print (p,c0,c1,hk)
#coords = np.array([xx+p[0],yy+p[1]])
coords = np.array([xx + (c0 - hk), yy + (c1 - hk)])
#print (c0,c1)
#import pylab as plt
#plt.imshow(sub2, interpolation='none')
# shift array:
sub = map_coordinates(sub, coords,
mode='nearest').reshape(k, k)
# plt.figure(2)
#plt.imshow(sub, interpolation='none')
# plt.show()
#normalize:
bg = 0.25* ( sub[0].mean() + sub[-1].mean()
+ sub[:,0].mean() + sub[:,-1].mean())
sub-=bg
sub /= sub.max()
# import pylab as plt
# plt.figure(20)
# plt.imshow(sub, interpolation='none')
# # plt.figure(21)
# # plt.imshow(sub2, interpolation='none')
# plt.show()
self._psf += sub
if self.calc_std:
self.subs.append(sub)
except ValueError:
pass
|
python
|
def addImg(self, img, roi=None):
'''
img - background, flat field, ste corrected image
roi - [(x1,y1),...,(x4,y4)] - boundaries where points are
'''
self.img = imread(img, 'gray')
s0, s1 = self.img.shape
if roi is None:
roi = ((0, 0), (s0, 0), (s0, s1), (0, s1))
k = self.kernel_size
hk = k // 2
# mask image
img2 = self.img.copy() # .astype(int)
mask = np.zeros(self.img.shape)
cv2.fillConvexPoly(mask, np.asarray(roi, dtype=np.int32), color=1)
mask = mask.astype(bool)
im = img2[mask]
bg = im.mean() # assume image average with in roi == background
mask = ~mask
img2[mask] = -1
# find points from local maxima:
self.points = np.zeros(shape=(self.max_points, 2), dtype=int)
thresh = 0.8 * bg + 0.2 * im.max()
_findPoints(img2, thresh, self.min_dist, self.points)
self.points = self.points[:np.argmin(self.points, axis=0)[0]]
# correct point position, to that every point is over max value:
for n, p in enumerate(self.points):
sub = self.img[p[1] - hk:p[1] + hk + 1, p[0] - hk:p[0] + hk + 1]
i, j = np.unravel_index(np.nanargmax(sub), sub.shape)
self.points[n] += [j - hk, i - hk]
# remove points that are too close to their neighbour or the border
mask = maximum_filter(mask, hk)
i = np.ones(self.points.shape[0], dtype=bool)
for n, p in enumerate(self.points):
if mask[p[1], p[0]]: # too close to border
i[n] = False
else:
# too close to other points
for pp in self.points[n + 1:]:
if norm(p - pp) < hk + 1:
i[n] = False
isum = i.sum()
ll = len(i) - isum
print('found %s points' % isum)
if ll:
print(
'removed %s points (too close to border or other points)' %
ll)
self.points = self.points[i]
# self.n_points += len(self.points)
# for finding best peak position:
# def fn(xy,cx,cy):#par
# (x,y) = xy
# return 1-(((x-cx)**2 + (y-cy)**2)*(1/8)).flatten()
# x,y = np.mgrid[-2:3,-2:3]
# x = x.flatten()
# y = y.flatten()
# for shifting peak:
xx, yy = np.mgrid[0:k, 0:k]
xx = xx.astype(float)
yy = yy.astype(float)
self.subs = []
# import pylab as plt
# plt.figure(20)
# img = self.drawPoints()
# plt.imshow(img, interpolation='none')
# # plt.figure(21)
# # plt.imshow(sub2, interpolation='none')
# plt.show()
#thresh = 0.8*bg + 0.1*im.max()
for i, p in enumerate(self.points):
sub = self.img[p[1] - hk:p[1] + hk + 1,
p[0] - hk:p[0] + hk + 1].astype(float)
sub2 = sub.copy()
mean = sub2.mean()
mx = sub2.max()
sub2[sub2 < 0.5 * (mean + mx)] = 0 # only select peak
try:
# SHIFT SUB ARRAY to align peak maximum exactly in middle:
# only eval a 5x5 array in middle of sub:
# peak = sub[hk-3:hk+4,hk-3:hk+4]#.copy()
# peak -= peak.min()
# peak/=peak.max()
# peak = peak.flatten()
# fit paraboloid to get shift in x,y:
# p, _ = curve_fit(fn, (x,y), peak, (0,0))
c0, c1 = center_of_mass(sub2)
# print (p,c0,c1,hk)
#coords = np.array([xx+p[0],yy+p[1]])
coords = np.array([xx + (c0 - hk), yy + (c1 - hk)])
#print (c0,c1)
#import pylab as plt
#plt.imshow(sub2, interpolation='none')
# shift array:
sub = map_coordinates(sub, coords,
mode='nearest').reshape(k, k)
# plt.figure(2)
#plt.imshow(sub, interpolation='none')
# plt.show()
#normalize:
bg = 0.25* ( sub[0].mean() + sub[-1].mean()
+ sub[:,0].mean() + sub[:,-1].mean())
sub-=bg
sub /= sub.max()
# import pylab as plt
# plt.figure(20)
# plt.imshow(sub, interpolation='none')
# # plt.figure(21)
# # plt.imshow(sub2, interpolation='none')
# plt.show()
self._psf += sub
if self.calc_std:
self.subs.append(sub)
except ValueError:
pass
|
[
"def",
"addImg",
"(",
"self",
",",
"img",
",",
"roi",
"=",
"None",
")",
":",
"self",
".",
"img",
"=",
"imread",
"(",
"img",
",",
"'gray'",
")",
"s0",
",",
"s1",
"=",
"self",
".",
"img",
".",
"shape",
"if",
"roi",
"is",
"None",
":",
"roi",
"=",
"(",
"(",
"0",
",",
"0",
")",
",",
"(",
"s0",
",",
"0",
")",
",",
"(",
"s0",
",",
"s1",
")",
",",
"(",
"0",
",",
"s1",
")",
")",
"k",
"=",
"self",
".",
"kernel_size",
"hk",
"=",
"k",
"//",
"2",
"# mask image\r",
"img2",
"=",
"self",
".",
"img",
".",
"copy",
"(",
")",
"# .astype(int)\r",
"mask",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"img",
".",
"shape",
")",
"cv2",
".",
"fillConvexPoly",
"(",
"mask",
",",
"np",
".",
"asarray",
"(",
"roi",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
",",
"color",
"=",
"1",
")",
"mask",
"=",
"mask",
".",
"astype",
"(",
"bool",
")",
"im",
"=",
"img2",
"[",
"mask",
"]",
"bg",
"=",
"im",
".",
"mean",
"(",
")",
"# assume image average with in roi == background\r",
"mask",
"=",
"~",
"mask",
"img2",
"[",
"mask",
"]",
"=",
"-",
"1",
"# find points from local maxima:\r",
"self",
".",
"points",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"max_points",
",",
"2",
")",
",",
"dtype",
"=",
"int",
")",
"thresh",
"=",
"0.8",
"*",
"bg",
"+",
"0.2",
"*",
"im",
".",
"max",
"(",
")",
"_findPoints",
"(",
"img2",
",",
"thresh",
",",
"self",
".",
"min_dist",
",",
"self",
".",
"points",
")",
"self",
".",
"points",
"=",
"self",
".",
"points",
"[",
":",
"np",
".",
"argmin",
"(",
"self",
".",
"points",
",",
"axis",
"=",
"0",
")",
"[",
"0",
"]",
"]",
"# correct point position, to that every point is over max value:\r",
"for",
"n",
",",
"p",
"in",
"enumerate",
"(",
"self",
".",
"points",
")",
":",
"sub",
"=",
"self",
".",
"img",
"[",
"p",
"[",
"1",
"]",
"-",
"hk",
":",
"p",
"[",
"1",
"]",
"+",
"hk",
"+",
"1",
",",
"p",
"[",
"0",
"]",
"-",
"hk",
":",
"p",
"[",
"0",
"]",
"+",
"hk",
"+",
"1",
"]",
"i",
",",
"j",
"=",
"np",
".",
"unravel_index",
"(",
"np",
".",
"nanargmax",
"(",
"sub",
")",
",",
"sub",
".",
"shape",
")",
"self",
".",
"points",
"[",
"n",
"]",
"+=",
"[",
"j",
"-",
"hk",
",",
"i",
"-",
"hk",
"]",
"# remove points that are too close to their neighbour or the border\r",
"mask",
"=",
"maximum_filter",
"(",
"mask",
",",
"hk",
")",
"i",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"points",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"bool",
")",
"for",
"n",
",",
"p",
"in",
"enumerate",
"(",
"self",
".",
"points",
")",
":",
"if",
"mask",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"0",
"]",
"]",
":",
"# too close to border\r",
"i",
"[",
"n",
"]",
"=",
"False",
"else",
":",
"# too close to other points\r",
"for",
"pp",
"in",
"self",
".",
"points",
"[",
"n",
"+",
"1",
":",
"]",
":",
"if",
"norm",
"(",
"p",
"-",
"pp",
")",
"<",
"hk",
"+",
"1",
":",
"i",
"[",
"n",
"]",
"=",
"False",
"isum",
"=",
"i",
".",
"sum",
"(",
")",
"ll",
"=",
"len",
"(",
"i",
")",
"-",
"isum",
"print",
"(",
"'found %s points'",
"%",
"isum",
")",
"if",
"ll",
":",
"print",
"(",
"'removed %s points (too close to border or other points)'",
"%",
"ll",
")",
"self",
".",
"points",
"=",
"self",
".",
"points",
"[",
"i",
"]",
"# self.n_points += len(self.points)\r",
"# for finding best peak position:\r",
"# def fn(xy,cx,cy):#par\r",
"# (x,y) = xy\r",
"# return 1-(((x-cx)**2 + (y-cy)**2)*(1/8)).flatten()\r",
"# x,y = np.mgrid[-2:3,-2:3]\r",
"# x = x.flatten()\r",
"# y = y.flatten()\r",
"# for shifting peak:\r",
"xx",
",",
"yy",
"=",
"np",
".",
"mgrid",
"[",
"0",
":",
"k",
",",
"0",
":",
"k",
"]",
"xx",
"=",
"xx",
".",
"astype",
"(",
"float",
")",
"yy",
"=",
"yy",
".",
"astype",
"(",
"float",
")",
"self",
".",
"subs",
"=",
"[",
"]",
"# import pylab as plt\r",
"# plt.figure(20)\r",
"# img = self.drawPoints()\r",
"# plt.imshow(img, interpolation='none')\r",
"# # plt.figure(21)\r",
"# # plt.imshow(sub2, interpolation='none')\r",
"# plt.show()\r",
"#thresh = 0.8*bg + 0.1*im.max()\r",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"self",
".",
"points",
")",
":",
"sub",
"=",
"self",
".",
"img",
"[",
"p",
"[",
"1",
"]",
"-",
"hk",
":",
"p",
"[",
"1",
"]",
"+",
"hk",
"+",
"1",
",",
"p",
"[",
"0",
"]",
"-",
"hk",
":",
"p",
"[",
"0",
"]",
"+",
"hk",
"+",
"1",
"]",
".",
"astype",
"(",
"float",
")",
"sub2",
"=",
"sub",
".",
"copy",
"(",
")",
"mean",
"=",
"sub2",
".",
"mean",
"(",
")",
"mx",
"=",
"sub2",
".",
"max",
"(",
")",
"sub2",
"[",
"sub2",
"<",
"0.5",
"*",
"(",
"mean",
"+",
"mx",
")",
"]",
"=",
"0",
"# only select peak\r",
"try",
":",
"# SHIFT SUB ARRAY to align peak maximum exactly in middle:\r",
"# only eval a 5x5 array in middle of sub:\r",
"# peak = sub[hk-3:hk+4,hk-3:hk+4]#.copy()\r",
"# peak -= peak.min()\r",
"# peak/=peak.max()\r",
"# peak = peak.flatten()\r",
"# fit paraboloid to get shift in x,y:\r",
"# p, _ = curve_fit(fn, (x,y), peak, (0,0))\r",
"c0",
",",
"c1",
"=",
"center_of_mass",
"(",
"sub2",
")",
"# print (p,c0,c1,hk)\r",
"#coords = np.array([xx+p[0],yy+p[1]])\r",
"coords",
"=",
"np",
".",
"array",
"(",
"[",
"xx",
"+",
"(",
"c0",
"-",
"hk",
")",
",",
"yy",
"+",
"(",
"c1",
"-",
"hk",
")",
"]",
")",
"#print (c0,c1)\r",
"#import pylab as plt\r",
"#plt.imshow(sub2, interpolation='none')\r",
"# shift array:\r",
"sub",
"=",
"map_coordinates",
"(",
"sub",
",",
"coords",
",",
"mode",
"=",
"'nearest'",
")",
".",
"reshape",
"(",
"k",
",",
"k",
")",
"# plt.figure(2)\r",
"#plt.imshow(sub, interpolation='none')\r",
"# plt.show()\r",
"#normalize:\r",
"bg",
"=",
"0.25",
"*",
"(",
"sub",
"[",
"0",
"]",
".",
"mean",
"(",
")",
"+",
"sub",
"[",
"-",
"1",
"]",
".",
"mean",
"(",
")",
"+",
"sub",
"[",
":",
",",
"0",
"]",
".",
"mean",
"(",
")",
"+",
"sub",
"[",
":",
",",
"-",
"1",
"]",
".",
"mean",
"(",
")",
")",
"sub",
"-=",
"bg",
"sub",
"/=",
"sub",
".",
"max",
"(",
")",
"# import pylab as plt\r",
"# plt.figure(20)\r",
"# plt.imshow(sub, interpolation='none')\r",
"# # plt.figure(21)\r",
"# # plt.imshow(sub2, interpolation='none')\r",
"# plt.show()\r",
"self",
".",
"_psf",
"+=",
"sub",
"if",
"self",
".",
"calc_std",
":",
"self",
".",
"subs",
".",
"append",
"(",
"sub",
")",
"except",
"ValueError",
":",
"pass"
] |
img - background, flat field, ste corrected image
roi - [(x1,y1),...,(x4,y4)] - boundaries where points are
|
[
"img",
"-",
"background",
"flat",
"field",
"ste",
"corrected",
"image",
"roi",
"-",
"[",
"(",
"x1",
"y1",
")",
"...",
"(",
"x4",
"y4",
")",
"]",
"-",
"boundaries",
"where",
"points",
"are"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/sharpness/SharpnessfromPoints.py#L78-L220
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/interpolate2dStructuredFastIDW.py
|
interpolate2dStructuredFastIDW
|
def interpolate2dStructuredFastIDW(grid, mask, kernel=15, power=2,
minnvals=5):
'''
FASTER IMPLEMENTATION OF interpolate2dStructuredIDW
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[minvals] -> minimum number of neighbour values to find until
interpolation stops
'''
indices, dist = growPositions(kernel)
weights = 1 / dist**(0.5 * power)
return _calc(grid, mask, indices, weights, minnvals - 1)
|
python
|
def interpolate2dStructuredFastIDW(grid, mask, kernel=15, power=2,
minnvals=5):
'''
FASTER IMPLEMENTATION OF interpolate2dStructuredIDW
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[minvals] -> minimum number of neighbour values to find until
interpolation stops
'''
indices, dist = growPositions(kernel)
weights = 1 / dist**(0.5 * power)
return _calc(grid, mask, indices, weights, minnvals - 1)
|
[
"def",
"interpolate2dStructuredFastIDW",
"(",
"grid",
",",
"mask",
",",
"kernel",
"=",
"15",
",",
"power",
"=",
"2",
",",
"minnvals",
"=",
"5",
")",
":",
"indices",
",",
"dist",
"=",
"growPositions",
"(",
"kernel",
")",
"weights",
"=",
"1",
"/",
"dist",
"**",
"(",
"0.5",
"*",
"power",
")",
"return",
"_calc",
"(",
"grid",
",",
"mask",
",",
"indices",
",",
"weights",
",",
"minnvals",
"-",
"1",
")"
] |
FASTER IMPLEMENTATION OF interpolate2dStructuredIDW
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[minvals] -> minimum number of neighbour values to find until
interpolation stops
|
[
"FASTER",
"IMPLEMENTATION",
"OF",
"interpolate2dStructuredIDW",
"replace",
"all",
"values",
"in",
"[",
"grid",
"]",
"indicated",
"by",
"[",
"mask",
"]",
"with",
"the",
"inverse",
"distance",
"weighted",
"interpolation",
"of",
"all",
"values",
"within",
"px",
"+",
"-",
"kernel",
"[",
"power",
"]",
"-",
">",
"distance",
"weighting",
"factor",
":",
"1",
"/",
"distance",
"**",
"[",
"power",
"]",
"[",
"minvals",
"]",
"-",
">",
"minimum",
"number",
"of",
"neighbour",
"values",
"to",
"find",
"until",
"interpolation",
"stops"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dStructuredFastIDW.py#L9-L26
|
radjkarl/imgProcessor
|
imgProcessor/transform/linearBlend.py
|
linearBlend
|
def linearBlend(img1, img2, overlap, backgroundColor=None):
'''
Stitch 2 images vertically together.
Smooth the overlap area of both images with a linear fade from img1 to img2
@param img1: numpy.2dArray
@param img2: numpy.2dArray of the same shape[1,2] as img1
@param overlap: number of pixels both images overlap
@returns: stitched-image
'''
(sizex, sizey) = img1.shape[:2]
overlapping = True
if overlap < 0:
overlapping = False
overlap = -overlap
# linear transparency change:
alpha = np.tile(np.expand_dims(np.linspace(1, 0, overlap), 1), sizey)
if len(img2.shape) == 3: # multi channel img like rgb
# make alpha 3d with n channels
alpha = np.dstack(([alpha for _ in range(img2.shape[2])]))
if overlapping:
img1_cut = img1[sizex - overlap:sizex, :]
img2_cut = img2[0:overlap, :]
else:
# take average of last 5 rows:
img1_cut = np.tile(img1[-min(sizex, 5):, :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
img2_cut = np.tile(img2[:min(img2.shape[0], 5), :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
# fill intermediate area as mixture of both images
#################bg transparent############
inter = (img1_cut * alpha + img2_cut * (1 - alpha)).astype(img1.dtype)
# set background areas to value of respective other img:
if backgroundColor is not None:
mask = np.logical_and(img1_cut == backgroundColor,
img2_cut != backgroundColor)
inter[mask] = img2_cut[mask]
mask = np.logical_and(img2_cut == backgroundColor,
img1_cut != backgroundColor)
inter[mask] = img1_cut[mask]
if not overlapping:
overlap = 0
return np.vstack((img1[0:sizex - overlap, :],
inter,
img2[overlap:, :]))
|
python
|
def linearBlend(img1, img2, overlap, backgroundColor=None):
'''
Stitch 2 images vertically together.
Smooth the overlap area of both images with a linear fade from img1 to img2
@param img1: numpy.2dArray
@param img2: numpy.2dArray of the same shape[1,2] as img1
@param overlap: number of pixels both images overlap
@returns: stitched-image
'''
(sizex, sizey) = img1.shape[:2]
overlapping = True
if overlap < 0:
overlapping = False
overlap = -overlap
# linear transparency change:
alpha = np.tile(np.expand_dims(np.linspace(1, 0, overlap), 1), sizey)
if len(img2.shape) == 3: # multi channel img like rgb
# make alpha 3d with n channels
alpha = np.dstack(([alpha for _ in range(img2.shape[2])]))
if overlapping:
img1_cut = img1[sizex - overlap:sizex, :]
img2_cut = img2[0:overlap, :]
else:
# take average of last 5 rows:
img1_cut = np.tile(img1[-min(sizex, 5):, :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
img2_cut = np.tile(img2[:min(img2.shape[0], 5), :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
# fill intermediate area as mixture of both images
#################bg transparent############
inter = (img1_cut * alpha + img2_cut * (1 - alpha)).astype(img1.dtype)
# set background areas to value of respective other img:
if backgroundColor is not None:
mask = np.logical_and(img1_cut == backgroundColor,
img2_cut != backgroundColor)
inter[mask] = img2_cut[mask]
mask = np.logical_and(img2_cut == backgroundColor,
img1_cut != backgroundColor)
inter[mask] = img1_cut[mask]
if not overlapping:
overlap = 0
return np.vstack((img1[0:sizex - overlap, :],
inter,
img2[overlap:, :]))
|
[
"def",
"linearBlend",
"(",
"img1",
",",
"img2",
",",
"overlap",
",",
"backgroundColor",
"=",
"None",
")",
":",
"(",
"sizex",
",",
"sizey",
")",
"=",
"img1",
".",
"shape",
"[",
":",
"2",
"]",
"overlapping",
"=",
"True",
"if",
"overlap",
"<",
"0",
":",
"overlapping",
"=",
"False",
"overlap",
"=",
"-",
"overlap",
"# linear transparency change:\r",
"alpha",
"=",
"np",
".",
"tile",
"(",
"np",
".",
"expand_dims",
"(",
"np",
".",
"linspace",
"(",
"1",
",",
"0",
",",
"overlap",
")",
",",
"1",
")",
",",
"sizey",
")",
"if",
"len",
"(",
"img2",
".",
"shape",
")",
"==",
"3",
":",
"# multi channel img like rgb\r",
"# make alpha 3d with n channels\r",
"alpha",
"=",
"np",
".",
"dstack",
"(",
"(",
"[",
"alpha",
"for",
"_",
"in",
"range",
"(",
"img2",
".",
"shape",
"[",
"2",
"]",
")",
"]",
")",
")",
"if",
"overlapping",
":",
"img1_cut",
"=",
"img1",
"[",
"sizex",
"-",
"overlap",
":",
"sizex",
",",
":",
"]",
"img2_cut",
"=",
"img2",
"[",
"0",
":",
"overlap",
",",
":",
"]",
"else",
":",
"# take average of last 5 rows:\r",
"img1_cut",
"=",
"np",
".",
"tile",
"(",
"img1",
"[",
"-",
"min",
"(",
"sizex",
",",
"5",
")",
":",
",",
":",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
",",
"(",
"overlap",
",",
"1",
")",
")",
".",
"reshape",
"(",
"alpha",
".",
"shape",
")",
"img2_cut",
"=",
"np",
".",
"tile",
"(",
"img2",
"[",
":",
"min",
"(",
"img2",
".",
"shape",
"[",
"0",
"]",
",",
"5",
")",
",",
":",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
",",
"(",
"overlap",
",",
"1",
")",
")",
".",
"reshape",
"(",
"alpha",
".",
"shape",
")",
"# fill intermediate area as mixture of both images\r",
"#################bg transparent############\r",
"inter",
"=",
"(",
"img1_cut",
"*",
"alpha",
"+",
"img2_cut",
"*",
"(",
"1",
"-",
"alpha",
")",
")",
".",
"astype",
"(",
"img1",
".",
"dtype",
")",
"# set background areas to value of respective other img:\r",
"if",
"backgroundColor",
"is",
"not",
"None",
":",
"mask",
"=",
"np",
".",
"logical_and",
"(",
"img1_cut",
"==",
"backgroundColor",
",",
"img2_cut",
"!=",
"backgroundColor",
")",
"inter",
"[",
"mask",
"]",
"=",
"img2_cut",
"[",
"mask",
"]",
"mask",
"=",
"np",
".",
"logical_and",
"(",
"img2_cut",
"==",
"backgroundColor",
",",
"img1_cut",
"!=",
"backgroundColor",
")",
"inter",
"[",
"mask",
"]",
"=",
"img1_cut",
"[",
"mask",
"]",
"if",
"not",
"overlapping",
":",
"overlap",
"=",
"0",
"return",
"np",
".",
"vstack",
"(",
"(",
"img1",
"[",
"0",
":",
"sizex",
"-",
"overlap",
",",
":",
"]",
",",
"inter",
",",
"img2",
"[",
"overlap",
":",
",",
":",
"]",
")",
")"
] |
Stitch 2 images vertically together.
Smooth the overlap area of both images with a linear fade from img1 to img2
@param img1: numpy.2dArray
@param img2: numpy.2dArray of the same shape[1,2] as img1
@param overlap: number of pixels both images overlap
@returns: stitched-image
|
[
"Stitch",
"2",
"images",
"vertically",
"together",
".",
"Smooth",
"the",
"overlap",
"area",
"of",
"both",
"images",
"with",
"a",
"linear",
"fade",
"from",
"img1",
"to",
"img2"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/linearBlend.py#L7-L55
|
radjkarl/imgProcessor
|
imgProcessor/interpolate/interpolate2dStructuredPointSpreadIDW.py
|
interpolate2dStructuredPointSpreadIDW
|
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2,
maxIter=1e5, copy=True):
'''
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
'''
assert grid.shape == mask.shape, 'grid and mask shape are different'
border = np.zeros(shape=mask.shape, dtype=np.bool)
if copy:
# copy mask as well because if will be modified later:
mask = mask.copy()
grid = grid.copy()
return _calc(grid, mask, border, kernel, power, maxIter)
|
python
|
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2,
maxIter=1e5, copy=True):
'''
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
'''
assert grid.shape == mask.shape, 'grid and mask shape are different'
border = np.zeros(shape=mask.shape, dtype=np.bool)
if copy:
# copy mask as well because if will be modified later:
mask = mask.copy()
grid = grid.copy()
return _calc(grid, mask, border, kernel, power, maxIter)
|
[
"def",
"interpolate2dStructuredPointSpreadIDW",
"(",
"grid",
",",
"mask",
",",
"kernel",
"=",
"15",
",",
"power",
"=",
"2",
",",
"maxIter",
"=",
"1e5",
",",
"copy",
"=",
"True",
")",
":",
"assert",
"grid",
".",
"shape",
"==",
"mask",
".",
"shape",
",",
"'grid and mask shape are different'",
"border",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"mask",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"if",
"copy",
":",
"# copy mask as well because if will be modified later:\r",
"mask",
"=",
"mask",
".",
"copy",
"(",
")",
"grid",
"=",
"grid",
".",
"copy",
"(",
")",
"return",
"_calc",
"(",
"grid",
",",
"mask",
",",
"border",
",",
"kernel",
",",
"power",
",",
"maxIter",
")"
] |
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
|
[
"same",
"as",
"interpolate2dStructuredIDW",
"but",
"using",
"the",
"point",
"spread",
"method",
"this",
"is",
"faster",
"if",
"there",
"are",
"bigger",
"connected",
"masked",
"areas",
"and",
"the",
"border",
"length",
"is",
"smaller",
"replace",
"all",
"values",
"in",
"[",
"grid",
"]",
"indicated",
"by",
"[",
"mask",
"]",
"with",
"the",
"inverse",
"distance",
"weighted",
"interpolation",
"of",
"all",
"values",
"within",
"px",
"+",
"-",
"kernel",
"[",
"power",
"]",
"-",
">",
"distance",
"weighting",
"factor",
":",
"1",
"/",
"distance",
"**",
"[",
"power",
"]",
"[",
"copy",
"]",
"-",
">",
"False",
":",
"a",
"bit",
"faster",
"but",
"modifies",
"grid",
"and",
"mask"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dStructuredPointSpreadIDW.py#L7-L28
|
radjkarl/imgProcessor
|
imgProcessor/measure/SNR/SNRaverage.py
|
SNRaverage
|
def SNRaverage(snr, method='average', excludeBackground=True,
checkBackground=True,
backgroundLevel=None):
'''
average a signal-to-noise map
:param method: ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal
:type method: str
:param checkBackground: check whether there is actually a background level to exclude
:type checkBackground: bool
:returns: averaged SNR as float
'''
if excludeBackground:
# get background level
if backgroundLevel is None:
try:
f = FitHistogramPeaks(snr).fitParams
if checkBackground:
if not hasBackground(f):
excludeBackground = False
if excludeBackground:
backgroundLevel = getSignalMinimum(f)
except (ValueError, AssertionError):
backgroundLevel = snr.min()
if excludeBackground:
snr = snr[snr >= backgroundLevel]
if method == 'RMS':
avg = (snr**2).mean()**0.5
elif method == 'average':
avg = snr.mean()
# if np.isnan(avg):
# avg = np.nanmean(snr)
elif method == 'median':
avg = np.median(snr)
# if np.isnan(avg):
# avg = np.nanmedian(snr)
elif method == 'X75':
r = (snr.min(), snr.max())
hist, bin_edges = np.histogram(snr, bins=2 * int(r[1] - r[0]), range=r)
hist = np.asfarray(hist) / hist.sum()
cdf = np.cumsum(hist)
i = np.argmax(cdf > 0.25)
avg = bin_edges[i]
else:
raise NotImplemented("given SNR average doesn't exist")
return avg
|
python
|
def SNRaverage(snr, method='average', excludeBackground=True,
checkBackground=True,
backgroundLevel=None):
'''
average a signal-to-noise map
:param method: ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal
:type method: str
:param checkBackground: check whether there is actually a background level to exclude
:type checkBackground: bool
:returns: averaged SNR as float
'''
if excludeBackground:
# get background level
if backgroundLevel is None:
try:
f = FitHistogramPeaks(snr).fitParams
if checkBackground:
if not hasBackground(f):
excludeBackground = False
if excludeBackground:
backgroundLevel = getSignalMinimum(f)
except (ValueError, AssertionError):
backgroundLevel = snr.min()
if excludeBackground:
snr = snr[snr >= backgroundLevel]
if method == 'RMS':
avg = (snr**2).mean()**0.5
elif method == 'average':
avg = snr.mean()
# if np.isnan(avg):
# avg = np.nanmean(snr)
elif method == 'median':
avg = np.median(snr)
# if np.isnan(avg):
# avg = np.nanmedian(snr)
elif method == 'X75':
r = (snr.min(), snr.max())
hist, bin_edges = np.histogram(snr, bins=2 * int(r[1] - r[0]), range=r)
hist = np.asfarray(hist) / hist.sum()
cdf = np.cumsum(hist)
i = np.argmax(cdf > 0.25)
avg = bin_edges[i]
else:
raise NotImplemented("given SNR average doesn't exist")
return avg
|
[
"def",
"SNRaverage",
"(",
"snr",
",",
"method",
"=",
"'average'",
",",
"excludeBackground",
"=",
"True",
",",
"checkBackground",
"=",
"True",
",",
"backgroundLevel",
"=",
"None",
")",
":",
"if",
"excludeBackground",
":",
"# get background level\r",
"if",
"backgroundLevel",
"is",
"None",
":",
"try",
":",
"f",
"=",
"FitHistogramPeaks",
"(",
"snr",
")",
".",
"fitParams",
"if",
"checkBackground",
":",
"if",
"not",
"hasBackground",
"(",
"f",
")",
":",
"excludeBackground",
"=",
"False",
"if",
"excludeBackground",
":",
"backgroundLevel",
"=",
"getSignalMinimum",
"(",
"f",
")",
"except",
"(",
"ValueError",
",",
"AssertionError",
")",
":",
"backgroundLevel",
"=",
"snr",
".",
"min",
"(",
")",
"if",
"excludeBackground",
":",
"snr",
"=",
"snr",
"[",
"snr",
">=",
"backgroundLevel",
"]",
"if",
"method",
"==",
"'RMS'",
":",
"avg",
"=",
"(",
"snr",
"**",
"2",
")",
".",
"mean",
"(",
")",
"**",
"0.5",
"elif",
"method",
"==",
"'average'",
":",
"avg",
"=",
"snr",
".",
"mean",
"(",
")",
"# if np.isnan(avg):\r",
"# avg = np.nanmean(snr)\r",
"elif",
"method",
"==",
"'median'",
":",
"avg",
"=",
"np",
".",
"median",
"(",
"snr",
")",
"# if np.isnan(avg):\r",
"# avg = np.nanmedian(snr) \r",
"elif",
"method",
"==",
"'X75'",
":",
"r",
"=",
"(",
"snr",
".",
"min",
"(",
")",
",",
"snr",
".",
"max",
"(",
")",
")",
"hist",
",",
"bin_edges",
"=",
"np",
".",
"histogram",
"(",
"snr",
",",
"bins",
"=",
"2",
"*",
"int",
"(",
"r",
"[",
"1",
"]",
"-",
"r",
"[",
"0",
"]",
")",
",",
"range",
"=",
"r",
")",
"hist",
"=",
"np",
".",
"asfarray",
"(",
"hist",
")",
"/",
"hist",
".",
"sum",
"(",
")",
"cdf",
"=",
"np",
".",
"cumsum",
"(",
"hist",
")",
"i",
"=",
"np",
".",
"argmax",
"(",
"cdf",
">",
"0.25",
")",
"avg",
"=",
"bin_edges",
"[",
"i",
"]",
"else",
":",
"raise",
"NotImplemented",
"(",
"\"given SNR average doesn't exist\"",
")",
"return",
"avg"
] |
average a signal-to-noise map
:param method: ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal
:type method: str
:param checkBackground: check whether there is actually a background level to exclude
:type checkBackground: bool
:returns: averaged SNR as float
|
[
"average",
"a",
"signal",
"-",
"to",
"-",
"noise",
"map",
":",
"param",
"method",
":",
"[",
"average",
"X75",
"RMS",
"median",
"]",
"-",
"X75",
":",
"this",
"SNR",
"will",
"be",
"exceeded",
"by",
"75%",
"of",
"the",
"signal",
":",
"type",
"method",
":",
"str",
":",
"param",
"checkBackground",
":",
"check",
"whether",
"there",
"is",
"actually",
"a",
"background",
"level",
"to",
"exclude",
":",
"type",
"checkBackground",
":",
"bool",
":",
"returns",
":",
"averaged",
"SNR",
"as",
"float"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/SNR/SNRaverage.py#L10-L58
|
radjkarl/imgProcessor
|
imgProcessor/filters/maskedConvolve.py
|
maskedConvolve
|
def maskedConvolve(arr, kernel, mask, mode='reflect'):
'''
same as scipy.ndimage.convolve but is only executed on mask==True
... which should speed up everything
'''
arr2 = extendArrayForConvolution(arr, kernel.shape, modex=mode, modey=mode)
print(arr2.shape)
out = np.zeros_like(arr)
return _calc(arr2, kernel, mask, out)
|
python
|
def maskedConvolve(arr, kernel, mask, mode='reflect'):
'''
same as scipy.ndimage.convolve but is only executed on mask==True
... which should speed up everything
'''
arr2 = extendArrayForConvolution(arr, kernel.shape, modex=mode, modey=mode)
print(arr2.shape)
out = np.zeros_like(arr)
return _calc(arr2, kernel, mask, out)
|
[
"def",
"maskedConvolve",
"(",
"arr",
",",
"kernel",
",",
"mask",
",",
"mode",
"=",
"'reflect'",
")",
":",
"arr2",
"=",
"extendArrayForConvolution",
"(",
"arr",
",",
"kernel",
".",
"shape",
",",
"modex",
"=",
"mode",
",",
"modey",
"=",
"mode",
")",
"print",
"(",
"arr2",
".",
"shape",
")",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"arr",
")",
"return",
"_calc",
"(",
"arr2",
",",
"kernel",
",",
"mask",
",",
"out",
")"
] |
same as scipy.ndimage.convolve but is only executed on mask==True
... which should speed up everything
|
[
"same",
"as",
"scipy",
".",
"ndimage",
".",
"convolve",
"but",
"is",
"only",
"executed",
"on",
"mask",
"==",
"True",
"...",
"which",
"should",
"speed",
"up",
"everything"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/maskedConvolve.py#L13-L21
|
radjkarl/imgProcessor
|
imgProcessor/measure/SNR/SNR.py
|
SNR
|
def SNR(img1, img2=None, bg=None,
noise_level_function=None,
constant_noise_level=False,
imgs_to_be_averaged=False):
'''
Returns a signal-to-noise-map
uses algorithm as described in BEDRICH 2016 JPV (not jet published)
:param constant_noise_level: True, to assume noise to be constant
:param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
'''
# dark current subtraction:
img1 = np.asfarray(img1)
if bg is not None:
img1 = img1 - bg
# SIGNAL:
if img2 is not None:
img2_exists = True
img2 = np.asfarray(img2) - bg
# signal as average on both images
signal = 0.5 * (img1 + img2)
else:
img2_exists = False
signal = img1
# denoise:
signal = median_filter(signal, 3)
# NOISE
if constant_noise_level:
# CONSTANT NOISE
if img2_exists:
d = img1 - img2
# 0.5**0.5 because of sum of variances
noise = 0.5**0.5 * np.mean(np.abs((d))) * F_RMS2AAD
else:
d = (img1 - signal) * F_NOISE_WITH_MEDIAN
noise = np.mean(np.abs(d)) * F_RMS2AAD
else:
# NOISE LEVEL FUNCTION
if noise_level_function is None:
noise_level_function, _ = oneImageNLF(img1, img2, signal)
noise = noise_level_function(signal)
noise[noise < 1] = 1 # otherwise SNR could be higher than image value
if imgs_to_be_averaged:
# SNR will be higher if both given images are supposed to be averaged:
# factor of noise reduction if SNR if for average(img1, img2):
noise *= 0.5**0.5
# BACKGROUND estimation and removal if background not given:
if bg is None:
bg = getBackgroundLevel(img1)
signal -= bg
snr = signal / noise
# limit to 1, saying at these points signal=noise:
snr[snr < 1] = 1
return snr
|
python
|
def SNR(img1, img2=None, bg=None,
noise_level_function=None,
constant_noise_level=False,
imgs_to_be_averaged=False):
'''
Returns a signal-to-noise-map
uses algorithm as described in BEDRICH 2016 JPV (not jet published)
:param constant_noise_level: True, to assume noise to be constant
:param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
'''
# dark current subtraction:
img1 = np.asfarray(img1)
if bg is not None:
img1 = img1 - bg
# SIGNAL:
if img2 is not None:
img2_exists = True
img2 = np.asfarray(img2) - bg
# signal as average on both images
signal = 0.5 * (img1 + img2)
else:
img2_exists = False
signal = img1
# denoise:
signal = median_filter(signal, 3)
# NOISE
if constant_noise_level:
# CONSTANT NOISE
if img2_exists:
d = img1 - img2
# 0.5**0.5 because of sum of variances
noise = 0.5**0.5 * np.mean(np.abs((d))) * F_RMS2AAD
else:
d = (img1 - signal) * F_NOISE_WITH_MEDIAN
noise = np.mean(np.abs(d)) * F_RMS2AAD
else:
# NOISE LEVEL FUNCTION
if noise_level_function is None:
noise_level_function, _ = oneImageNLF(img1, img2, signal)
noise = noise_level_function(signal)
noise[noise < 1] = 1 # otherwise SNR could be higher than image value
if imgs_to_be_averaged:
# SNR will be higher if both given images are supposed to be averaged:
# factor of noise reduction if SNR if for average(img1, img2):
noise *= 0.5**0.5
# BACKGROUND estimation and removal if background not given:
if bg is None:
bg = getBackgroundLevel(img1)
signal -= bg
snr = signal / noise
# limit to 1, saying at these points signal=noise:
snr[snr < 1] = 1
return snr
|
[
"def",
"SNR",
"(",
"img1",
",",
"img2",
"=",
"None",
",",
"bg",
"=",
"None",
",",
"noise_level_function",
"=",
"None",
",",
"constant_noise_level",
"=",
"False",
",",
"imgs_to_be_averaged",
"=",
"False",
")",
":",
"# dark current subtraction:\r",
"img1",
"=",
"np",
".",
"asfarray",
"(",
"img1",
")",
"if",
"bg",
"is",
"not",
"None",
":",
"img1",
"=",
"img1",
"-",
"bg",
"# SIGNAL:\r",
"if",
"img2",
"is",
"not",
"None",
":",
"img2_exists",
"=",
"True",
"img2",
"=",
"np",
".",
"asfarray",
"(",
"img2",
")",
"-",
"bg",
"# signal as average on both images\r",
"signal",
"=",
"0.5",
"*",
"(",
"img1",
"+",
"img2",
")",
"else",
":",
"img2_exists",
"=",
"False",
"signal",
"=",
"img1",
"# denoise:\r",
"signal",
"=",
"median_filter",
"(",
"signal",
",",
"3",
")",
"# NOISE\r",
"if",
"constant_noise_level",
":",
"# CONSTANT NOISE\r",
"if",
"img2_exists",
":",
"d",
"=",
"img1",
"-",
"img2",
"# 0.5**0.5 because of sum of variances\r",
"noise",
"=",
"0.5",
"**",
"0.5",
"*",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"(",
"d",
")",
")",
")",
"*",
"F_RMS2AAD",
"else",
":",
"d",
"=",
"(",
"img1",
"-",
"signal",
")",
"*",
"F_NOISE_WITH_MEDIAN",
"noise",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"d",
")",
")",
"*",
"F_RMS2AAD",
"else",
":",
"# NOISE LEVEL FUNCTION\r",
"if",
"noise_level_function",
"is",
"None",
":",
"noise_level_function",
",",
"_",
"=",
"oneImageNLF",
"(",
"img1",
",",
"img2",
",",
"signal",
")",
"noise",
"=",
"noise_level_function",
"(",
"signal",
")",
"noise",
"[",
"noise",
"<",
"1",
"]",
"=",
"1",
"# otherwise SNR could be higher than image value\r",
"if",
"imgs_to_be_averaged",
":",
"# SNR will be higher if both given images are supposed to be averaged:\r",
"# factor of noise reduction if SNR if for average(img1, img2):\r",
"noise",
"*=",
"0.5",
"**",
"0.5",
"# BACKGROUND estimation and removal if background not given:\r",
"if",
"bg",
"is",
"None",
":",
"bg",
"=",
"getBackgroundLevel",
"(",
"img1",
")",
"signal",
"-=",
"bg",
"snr",
"=",
"signal",
"/",
"noise",
"# limit to 1, saying at these points signal=noise:\r",
"snr",
"[",
"snr",
"<",
"1",
"]",
"=",
"1",
"return",
"snr"
] |
Returns a signal-to-noise-map
uses algorithm as described in BEDRICH 2016 JPV (not jet published)
:param constant_noise_level: True, to assume noise to be constant
:param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
|
[
"Returns",
"a",
"signal",
"-",
"to",
"-",
"noise",
"-",
"map",
"uses",
"algorithm",
"as",
"described",
"in",
"BEDRICH",
"2016",
"JPV",
"(",
"not",
"jet",
"published",
")",
":",
"param",
"constant_noise_level",
":",
"True",
"to",
"assume",
"noise",
"to",
"be",
"constant",
":",
"param",
"imgs_to_be_averaged",
":",
"True",
"if",
"SNR",
"is",
"for",
"average",
"(",
"img1",
"img2",
")"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/SNR/SNR.py#L21-L79
|
radjkarl/imgProcessor
|
imgProcessor/utils/sortCorners.py
|
sortCorners
|
def sortCorners(corners):
'''
sort the corners of a given quadrilateral of the type
corners : [ [xi,yi],... ]
to an anti-clockwise order starting with the bottom left corner
or (if plotted as image where y increases to the bottom):
clockwise, starting top left
'''
corners = np.asarray(corners)
# bring edges in order:
corners2 = corners[ConvexHull(corners).vertices]
if len(corners2) == 3:
# sometimes ConvexHull one point is missing because it is
# within the hull triangle
# find the right position of set corner as the minimum perimeter
# built with that point as different indices
for c in corners:
if c not in corners2:
break
perimeter = []
for n in range(0, 4):
corners3 = np.insert(corners2, n, c, axis=0)
perimeter.append(
np.linalg.norm(
np.diff(
corners3,
axis=0),
axis=1).sum())
n = np.argmin(perimeter)
corners2 = np.insert(corners2, n, c, axis=0)
# find the edge with the right angle to the quad middle:
mn = corners2.mean(axis=0)
d = (corners2 - mn)
ascent = np.arctan2(d[:, 1], d[:, 0])
bl = np.abs(BL_ANGLE + ascent).argmin()
# build a index list starting with bl:
i = list(range(bl, 4))
i.extend(list(range(0, bl)))
return corners2[i]
|
python
|
def sortCorners(corners):
'''
sort the corners of a given quadrilateral of the type
corners : [ [xi,yi],... ]
to an anti-clockwise order starting with the bottom left corner
or (if plotted as image where y increases to the bottom):
clockwise, starting top left
'''
corners = np.asarray(corners)
# bring edges in order:
corners2 = corners[ConvexHull(corners).vertices]
if len(corners2) == 3:
# sometimes ConvexHull one point is missing because it is
# within the hull triangle
# find the right position of set corner as the minimum perimeter
# built with that point as different indices
for c in corners:
if c not in corners2:
break
perimeter = []
for n in range(0, 4):
corners3 = np.insert(corners2, n, c, axis=0)
perimeter.append(
np.linalg.norm(
np.diff(
corners3,
axis=0),
axis=1).sum())
n = np.argmin(perimeter)
corners2 = np.insert(corners2, n, c, axis=0)
# find the edge with the right angle to the quad middle:
mn = corners2.mean(axis=0)
d = (corners2 - mn)
ascent = np.arctan2(d[:, 1], d[:, 0])
bl = np.abs(BL_ANGLE + ascent).argmin()
# build a index list starting with bl:
i = list(range(bl, 4))
i.extend(list(range(0, bl)))
return corners2[i]
|
[
"def",
"sortCorners",
"(",
"corners",
")",
":",
"corners",
"=",
"np",
".",
"asarray",
"(",
"corners",
")",
"# bring edges in order:\r",
"corners2",
"=",
"corners",
"[",
"ConvexHull",
"(",
"corners",
")",
".",
"vertices",
"]",
"if",
"len",
"(",
"corners2",
")",
"==",
"3",
":",
"# sometimes ConvexHull one point is missing because it is\r",
"# within the hull triangle\r",
"# find the right position of set corner as the minimum perimeter\r",
"# built with that point as different indices\r",
"for",
"c",
"in",
"corners",
":",
"if",
"c",
"not",
"in",
"corners2",
":",
"break",
"perimeter",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"4",
")",
":",
"corners3",
"=",
"np",
".",
"insert",
"(",
"corners2",
",",
"n",
",",
"c",
",",
"axis",
"=",
"0",
")",
"perimeter",
".",
"append",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"np",
".",
"diff",
"(",
"corners3",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"1",
")",
".",
"sum",
"(",
")",
")",
"n",
"=",
"np",
".",
"argmin",
"(",
"perimeter",
")",
"corners2",
"=",
"np",
".",
"insert",
"(",
"corners2",
",",
"n",
",",
"c",
",",
"axis",
"=",
"0",
")",
"# find the edge with the right angle to the quad middle:\r",
"mn",
"=",
"corners2",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"d",
"=",
"(",
"corners2",
"-",
"mn",
")",
"ascent",
"=",
"np",
".",
"arctan2",
"(",
"d",
"[",
":",
",",
"1",
"]",
",",
"d",
"[",
":",
",",
"0",
"]",
")",
"bl",
"=",
"np",
".",
"abs",
"(",
"BL_ANGLE",
"+",
"ascent",
")",
".",
"argmin",
"(",
")",
"# build a index list starting with bl:\r",
"i",
"=",
"list",
"(",
"range",
"(",
"bl",
",",
"4",
")",
")",
"i",
".",
"extend",
"(",
"list",
"(",
"range",
"(",
"0",
",",
"bl",
")",
")",
")",
"return",
"corners2",
"[",
"i",
"]"
] |
sort the corners of a given quadrilateral of the type
corners : [ [xi,yi],... ]
to an anti-clockwise order starting with the bottom left corner
or (if plotted as image where y increases to the bottom):
clockwise, starting top left
|
[
"sort",
"the",
"corners",
"of",
"a",
"given",
"quadrilateral",
"of",
"the",
"type",
"corners",
":",
"[",
"[",
"xi",
"yi",
"]",
"...",
"]",
"to",
"an",
"anti",
"-",
"clockwise",
"order",
"starting",
"with",
"the",
"bottom",
"left",
"corner",
"or",
"(",
"if",
"plotted",
"as",
"image",
"where",
"y",
"increases",
"to",
"the",
"bottom",
")",
":",
"clockwise",
"starting",
"top",
"left"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/sortCorners.py#L8-L50
|
radjkarl/imgProcessor
|
imgProcessor/render/closestDirectDistance.py
|
closestDirectDistance
|
def closestDirectDistance(arr, ksize=30, dtype=np.uint16):
'''
return an array with contains the closest distance to the next positive
value given in arr within a given kernel size
'''
out = np.zeros_like(arr, dtype=dtype)
_calc(out, arr, ksize)
return out
|
python
|
def closestDirectDistance(arr, ksize=30, dtype=np.uint16):
'''
return an array with contains the closest distance to the next positive
value given in arr within a given kernel size
'''
out = np.zeros_like(arr, dtype=dtype)
_calc(out, arr, ksize)
return out
|
[
"def",
"closestDirectDistance",
"(",
"arr",
",",
"ksize",
"=",
"30",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
":",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"arr",
",",
"dtype",
"=",
"dtype",
")",
"_calc",
"(",
"out",
",",
"arr",
",",
"ksize",
")",
"return",
"out"
] |
return an array with contains the closest distance to the next positive
value given in arr within a given kernel size
|
[
"return",
"an",
"array",
"with",
"contains",
"the",
"closest",
"distance",
"to",
"the",
"next",
"positive",
"value",
"given",
"in",
"arr",
"within",
"a",
"given",
"kernel",
"size"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/render/closestDirectDistance.py#L6-L14
|
radjkarl/imgProcessor
|
imgProcessor/render/closestConnectedDistance.py
|
closestConnectedDistance
|
def closestConnectedDistance(target, walls=None,
max_len_border_line=500,
max_n_path=100,
concentrate_every_n_pixel=1):
'''
returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False
'''
c = concentrate_every_n_pixel
assert c >= 1
if walls is None:
walls = np.zeros_like(target, dtype=bool)
s = target.shape
dt = np.uint16
if max(target.shape) < 200:
dt = np.uint8
out = np.zeros((s[0] // c, s[1] // c), dtype=dt)
# temporary arrays:
growth = np.zeros_like(target, dtype=dt)
res = np.empty(shape=3, dtype=dt)
steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
new_steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
# run calculation:
_calc(growth, out, walls, target, steps, new_steps,
res, concentrate_every_n_pixel)
if c > 1:
# if concentrate_every_n_pixel > 1
# the resized output array
# will have wrong values close to the wall
# therefore substitute all wall value (-1)
# with an average of their closest neighbours
interpolate2dStructuredIDW(out, out == 0)
out = cv2.resize(out, s[::-1])
out[walls] = 0
return out
|
python
|
def closestConnectedDistance(target, walls=None,
max_len_border_line=500,
max_n_path=100,
concentrate_every_n_pixel=1):
'''
returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False
'''
c = concentrate_every_n_pixel
assert c >= 1
if walls is None:
walls = np.zeros_like(target, dtype=bool)
s = target.shape
dt = np.uint16
if max(target.shape) < 200:
dt = np.uint8
out = np.zeros((s[0] // c, s[1] // c), dtype=dt)
# temporary arrays:
growth = np.zeros_like(target, dtype=dt)
res = np.empty(shape=3, dtype=dt)
steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
new_steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
# run calculation:
_calc(growth, out, walls, target, steps, new_steps,
res, concentrate_every_n_pixel)
if c > 1:
# if concentrate_every_n_pixel > 1
# the resized output array
# will have wrong values close to the wall
# therefore substitute all wall value (-1)
# with an average of their closest neighbours
interpolate2dStructuredIDW(out, out == 0)
out = cv2.resize(out, s[::-1])
out[walls] = 0
return out
|
[
"def",
"closestConnectedDistance",
"(",
"target",
",",
"walls",
"=",
"None",
",",
"max_len_border_line",
"=",
"500",
",",
"max_n_path",
"=",
"100",
",",
"concentrate_every_n_pixel",
"=",
"1",
")",
":",
"c",
"=",
"concentrate_every_n_pixel",
"assert",
"c",
">=",
"1",
"if",
"walls",
"is",
"None",
":",
"walls",
"=",
"np",
".",
"zeros_like",
"(",
"target",
",",
"dtype",
"=",
"bool",
")",
"s",
"=",
"target",
".",
"shape",
"dt",
"=",
"np",
".",
"uint16",
"if",
"max",
"(",
"target",
".",
"shape",
")",
"<",
"200",
":",
"dt",
"=",
"np",
".",
"uint8",
"out",
"=",
"np",
".",
"zeros",
"(",
"(",
"s",
"[",
"0",
"]",
"//",
"c",
",",
"s",
"[",
"1",
"]",
"//",
"c",
")",
",",
"dtype",
"=",
"dt",
")",
"# temporary arrays:\r",
"growth",
"=",
"np",
".",
"zeros_like",
"(",
"target",
",",
"dtype",
"=",
"dt",
")",
"res",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"3",
",",
"dtype",
"=",
"dt",
")",
"steps",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"max_len_border_line",
",",
"2",
")",
",",
"dtype",
"=",
"dt",
")",
"new_steps",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"max_len_border_line",
",",
"2",
")",
",",
"dtype",
"=",
"dt",
")",
"# run calculation:\r",
"_calc",
"(",
"growth",
",",
"out",
",",
"walls",
",",
"target",
",",
"steps",
",",
"new_steps",
",",
"res",
",",
"concentrate_every_n_pixel",
")",
"if",
"c",
">",
"1",
":",
"# if concentrate_every_n_pixel > 1\r",
"# the resized output array\r",
"# will have wrong values close to the wall\r",
"# therefore substitute all wall value (-1)\r",
"# with an average of their closest neighbours\r",
"interpolate2dStructuredIDW",
"(",
"out",
",",
"out",
"==",
"0",
")",
"out",
"=",
"cv2",
".",
"resize",
"(",
"out",
",",
"s",
"[",
":",
":",
"-",
"1",
"]",
")",
"out",
"[",
"walls",
"]",
"=",
"0",
"return",
"out"
] |
returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False
|
[
"returns",
"an",
"array",
"with",
"contains",
"the",
"closest",
"distance",
"from",
"every",
"pixel",
"the",
"next",
"position",
"where",
"target",
"==",
"1",
"[",
"walls",
"]",
"binary",
"2darray",
"-",
"e",
".",
"g",
".",
"walls",
"in",
"a",
"labyrinth",
"that",
"have",
"to",
"be",
"surrounded",
"in",
"order",
"to",
"get",
"to",
"the",
"target",
"[",
"target",
"]",
"binary",
"2darray",
"-",
"positions",
"given",
"by",
"1",
"[",
"concentrate_every_n_pixel",
"]",
"often",
"the",
"distance",
"of",
"neighbour",
"pixels",
"is",
"similar",
"to",
"speed",
"up",
"calculation",
"set",
"this",
"value",
"to",
"e",
".",
"g",
".",
"3",
"to",
"calculate",
"only",
"the",
"distance",
"for",
"every",
"3",
".",
"pixel",
"and",
"interpolate",
"in",
"between",
"recommended",
"are",
"values",
"up",
"to",
"3",
"-",
"5",
"[",
"max_len_border_line",
"]",
"this",
"function",
"calculates",
"distances",
"travelled",
"using",
"region",
"growth",
"e",
".",
"g",
".",
"0123",
"1123",
"2223",
"3333",
"the",
"last",
"steps",
"(",
"e",
".",
"g",
".",
"for",
"all",
"steps",
"3",
"border_line",
"=",
"7",
")",
"are",
"stored",
"in",
"an",
"array",
"of",
"limited",
"length",
"defined",
"in",
"max_len_border_line",
"[",
"max_n_path",
"]",
"how",
"many",
"paths",
"are",
"possible",
"between",
"every",
"pixel",
"and",
"the",
"target",
"only",
"needed",
"if",
"fast",
"==",
"False"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/render/closestConnectedDistance.py#L14-L77
|
radjkarl/imgProcessor
|
imgProcessor/render/closestConnectedDistance.py
|
_grow
|
def _grow(growth, walls, target, i, j, steps, new_steps, res):
'''
fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process
'''
# clean array:
growth[:] = 0
if target[i, j]:
# pixel is in target
res[0] = 1
res[1] = i
res[2] = j
return
step = 1
s0, s1 = growth.shape
step_len = 1
new_step_ind = 0
steps[new_step_ind, 0] = i
steps[new_step_ind, 1] = j
growth[i, j] = 1
while True:
for n in range(step_len):
i, j = steps[n]
for ii, jj in DIRECT_NEIGHBOURS:
pi = i + ii
pj = j + jj
# if in image:
if 0 <= pi < s0 and 0 <= pj < s1:
# is growth array is empty and there are no walls:
# fill growth with current step
if growth[pi, pj] == 0 and not walls[pi, pj]:
growth[pi, pj] = step
if target[pi, pj]:
# found destination
res[0] = 1
res[1] = pi
res[2] = pj
return
new_steps[new_step_ind, 0] = pi
new_steps[new_step_ind, 1] = pj
new_step_ind += 1
if new_step_ind == 0:
# couldn't populate any more because growth is full
# and all possible steps are gone
res[0] = 0
return
step += 1
steps, new_steps = new_steps, steps
step_len = new_step_ind
new_step_ind = 0
|
python
|
def _grow(growth, walls, target, i, j, steps, new_steps, res):
'''
fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process
'''
# clean array:
growth[:] = 0
if target[i, j]:
# pixel is in target
res[0] = 1
res[1] = i
res[2] = j
return
step = 1
s0, s1 = growth.shape
step_len = 1
new_step_ind = 0
steps[new_step_ind, 0] = i
steps[new_step_ind, 1] = j
growth[i, j] = 1
while True:
for n in range(step_len):
i, j = steps[n]
for ii, jj in DIRECT_NEIGHBOURS:
pi = i + ii
pj = j + jj
# if in image:
if 0 <= pi < s0 and 0 <= pj < s1:
# is growth array is empty and there are no walls:
# fill growth with current step
if growth[pi, pj] == 0 and not walls[pi, pj]:
growth[pi, pj] = step
if target[pi, pj]:
# found destination
res[0] = 1
res[1] = pi
res[2] = pj
return
new_steps[new_step_ind, 0] = pi
new_steps[new_step_ind, 1] = pj
new_step_ind += 1
if new_step_ind == 0:
# couldn't populate any more because growth is full
# and all possible steps are gone
res[0] = 0
return
step += 1
steps, new_steps = new_steps, steps
step_len = new_step_ind
new_step_ind = 0
|
[
"def",
"_grow",
"(",
"growth",
",",
"walls",
",",
"target",
",",
"i",
",",
"j",
",",
"steps",
",",
"new_steps",
",",
"res",
")",
":",
"# clean array:\r",
"growth",
"[",
":",
"]",
"=",
"0",
"if",
"target",
"[",
"i",
",",
"j",
"]",
":",
"# pixel is in target\r",
"res",
"[",
"0",
"]",
"=",
"1",
"res",
"[",
"1",
"]",
"=",
"i",
"res",
"[",
"2",
"]",
"=",
"j",
"return",
"step",
"=",
"1",
"s0",
",",
"s1",
"=",
"growth",
".",
"shape",
"step_len",
"=",
"1",
"new_step_ind",
"=",
"0",
"steps",
"[",
"new_step_ind",
",",
"0",
"]",
"=",
"i",
"steps",
"[",
"new_step_ind",
",",
"1",
"]",
"=",
"j",
"growth",
"[",
"i",
",",
"j",
"]",
"=",
"1",
"while",
"True",
":",
"for",
"n",
"in",
"range",
"(",
"step_len",
")",
":",
"i",
",",
"j",
"=",
"steps",
"[",
"n",
"]",
"for",
"ii",
",",
"jj",
"in",
"DIRECT_NEIGHBOURS",
":",
"pi",
"=",
"i",
"+",
"ii",
"pj",
"=",
"j",
"+",
"jj",
"# if in image:\r",
"if",
"0",
"<=",
"pi",
"<",
"s0",
"and",
"0",
"<=",
"pj",
"<",
"s1",
":",
"# is growth array is empty and there are no walls:\r",
"# fill growth with current step\r",
"if",
"growth",
"[",
"pi",
",",
"pj",
"]",
"==",
"0",
"and",
"not",
"walls",
"[",
"pi",
",",
"pj",
"]",
":",
"growth",
"[",
"pi",
",",
"pj",
"]",
"=",
"step",
"if",
"target",
"[",
"pi",
",",
"pj",
"]",
":",
"# found destination\r",
"res",
"[",
"0",
"]",
"=",
"1",
"res",
"[",
"1",
"]",
"=",
"pi",
"res",
"[",
"2",
"]",
"=",
"pj",
"return",
"new_steps",
"[",
"new_step_ind",
",",
"0",
"]",
"=",
"pi",
"new_steps",
"[",
"new_step_ind",
",",
"1",
"]",
"=",
"pj",
"new_step_ind",
"+=",
"1",
"if",
"new_step_ind",
"==",
"0",
":",
"# couldn't populate any more because growth is full\r",
"# and all possible steps are gone\r",
"res",
"[",
"0",
"]",
"=",
"0",
"return",
"step",
"+=",
"1",
"steps",
",",
"new_steps",
"=",
"new_steps",
",",
"steps",
"step_len",
"=",
"new_step_ind",
"new_step_ind",
"=",
"0"
] |
fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process
|
[
"fills",
"[",
"res",
"]",
"with",
"[",
"distance",
"to",
"next",
"position",
"where",
"target",
"==",
"1",
"x",
"coord",
".",
"y",
"coord",
".",
"of",
"that",
"position",
"in",
"target",
"]",
"using",
"region",
"growth",
"i",
"j",
"-",
">",
"pixel",
"position",
"growth",
"-",
">",
"a",
"work",
"array",
"needed",
"to",
"measure",
"the",
"distance",
"steps",
"new_steps",
"-",
">",
"current",
"and",
"last",
"positions",
"of",
"the",
"region",
"growth",
"steps",
"using",
"this",
"instead",
"of",
"looking",
"for",
"the",
"right",
"step",
"position",
"in",
"[",
"growth",
"]",
"should",
"speed",
"up",
"the",
"process"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/render/closestConnectedDistance.py#L100-L166
|
radjkarl/imgProcessor
|
imgProcessor/features/polylinesFromBinImage.py
|
polylinesFromBinImage
|
def polylinesFromBinImage(img, minimum_cluster_size=6,
remove_small_obj_size=3,
reconnect_size=3,
max_n_contours=None, max_len_contour=None,
copy=True):
'''
return a list of arrays of un-branching contours
img -> (boolean) array
optional:
---------
minimum_cluster_size -> minimum number of pixels connected together to build a contour
##search_kernel_size -> TODO
##min_search_kernel_moment -> TODO
numeric:
-------------
max_n_contours -> maximum number of possible contours in img
max_len_contour -> maximum contour length
'''
assert minimum_cluster_size > 1
assert reconnect_size % 2, 'ksize needs to be odd'
# assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
# assume array size parameters, is not given:
if max_n_contours is None:
max_n_contours = max(img.shape)
if max_len_contour is None:
max_len_contour = sum(img.shape[:2])
# array containing coord. of all contours:
contours = np.zeros(shape=(max_n_contours, max_len_contour, 2),
dtype=np.uint16) # if not search_kernel_size else np.float32)
if img.dtype != np.bool:
img = img.astype(bool)
elif copy:
img = img.copy()
if remove_small_obj_size:
remove_small_objects(img, remove_small_obj_size,
connectivity=2, in_place=True)
if reconnect_size:
# remove gaps
maximum_filter(img, reconnect_size, output=img)
# reduce contour width to 1
img = skeletonize(img)
n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
contours = contours[:n_contours]
l = []
for c in contours:
ind = np.zeros(shape=len(c), dtype=bool)
_getValidInd(c, ind)
# remove all empty spaces:
l.append(c[ind])
return l
|
python
|
def polylinesFromBinImage(img, minimum_cluster_size=6,
remove_small_obj_size=3,
reconnect_size=3,
max_n_contours=None, max_len_contour=None,
copy=True):
'''
return a list of arrays of un-branching contours
img -> (boolean) array
optional:
---------
minimum_cluster_size -> minimum number of pixels connected together to build a contour
##search_kernel_size -> TODO
##min_search_kernel_moment -> TODO
numeric:
-------------
max_n_contours -> maximum number of possible contours in img
max_len_contour -> maximum contour length
'''
assert minimum_cluster_size > 1
assert reconnect_size % 2, 'ksize needs to be odd'
# assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
# assume array size parameters, is not given:
if max_n_contours is None:
max_n_contours = max(img.shape)
if max_len_contour is None:
max_len_contour = sum(img.shape[:2])
# array containing coord. of all contours:
contours = np.zeros(shape=(max_n_contours, max_len_contour, 2),
dtype=np.uint16) # if not search_kernel_size else np.float32)
if img.dtype != np.bool:
img = img.astype(bool)
elif copy:
img = img.copy()
if remove_small_obj_size:
remove_small_objects(img, remove_small_obj_size,
connectivity=2, in_place=True)
if reconnect_size:
# remove gaps
maximum_filter(img, reconnect_size, output=img)
# reduce contour width to 1
img = skeletonize(img)
n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
contours = contours[:n_contours]
l = []
for c in contours:
ind = np.zeros(shape=len(c), dtype=bool)
_getValidInd(c, ind)
# remove all empty spaces:
l.append(c[ind])
return l
|
[
"def",
"polylinesFromBinImage",
"(",
"img",
",",
"minimum_cluster_size",
"=",
"6",
",",
"remove_small_obj_size",
"=",
"3",
",",
"reconnect_size",
"=",
"3",
",",
"max_n_contours",
"=",
"None",
",",
"max_len_contour",
"=",
"None",
",",
"copy",
"=",
"True",
")",
":",
"assert",
"minimum_cluster_size",
">",
"1",
"assert",
"reconnect_size",
"%",
"2",
",",
"'ksize needs to be odd'",
"# assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'\r",
"# assume array size parameters, is not given:\r",
"if",
"max_n_contours",
"is",
"None",
":",
"max_n_contours",
"=",
"max",
"(",
"img",
".",
"shape",
")",
"if",
"max_len_contour",
"is",
"None",
":",
"max_len_contour",
"=",
"sum",
"(",
"img",
".",
"shape",
"[",
":",
"2",
"]",
")",
"# array containing coord. of all contours:\r",
"contours",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"max_n_contours",
",",
"max_len_contour",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"# if not search_kernel_size else np.float32)\r",
"if",
"img",
".",
"dtype",
"!=",
"np",
".",
"bool",
":",
"img",
"=",
"img",
".",
"astype",
"(",
"bool",
")",
"elif",
"copy",
":",
"img",
"=",
"img",
".",
"copy",
"(",
")",
"if",
"remove_small_obj_size",
":",
"remove_small_objects",
"(",
"img",
",",
"remove_small_obj_size",
",",
"connectivity",
"=",
"2",
",",
"in_place",
"=",
"True",
")",
"if",
"reconnect_size",
":",
"# remove gaps\r",
"maximum_filter",
"(",
"img",
",",
"reconnect_size",
",",
"output",
"=",
"img",
")",
"# reduce contour width to 1\r",
"img",
"=",
"skeletonize",
"(",
"img",
")",
"n_contours",
"=",
"_populateContoursArray",
"(",
"img",
",",
"contours",
",",
"minimum_cluster_size",
")",
"contours",
"=",
"contours",
"[",
":",
"n_contours",
"]",
"l",
"=",
"[",
"]",
"for",
"c",
"in",
"contours",
":",
"ind",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"len",
"(",
"c",
")",
",",
"dtype",
"=",
"bool",
")",
"_getValidInd",
"(",
"c",
",",
"ind",
")",
"# remove all empty spaces:\r",
"l",
".",
"append",
"(",
"c",
"[",
"ind",
"]",
")",
"return",
"l"
] |
return a list of arrays of un-branching contours
img -> (boolean) array
optional:
---------
minimum_cluster_size -> minimum number of pixels connected together to build a contour
##search_kernel_size -> TODO
##min_search_kernel_moment -> TODO
numeric:
-------------
max_n_contours -> maximum number of possible contours in img
max_len_contour -> maximum contour length
|
[
"return",
"a",
"list",
"of",
"arrays",
"of",
"un",
"-",
"branching",
"contours",
"img",
"-",
">",
"(",
"boolean",
")",
"array",
"optional",
":",
"---------",
"minimum_cluster_size",
"-",
">",
"minimum",
"number",
"of",
"pixels",
"connected",
"together",
"to",
"build",
"a",
"contour",
"##search_kernel_size",
"-",
">",
"TODO",
"##min_search_kernel_moment",
"-",
">",
"TODO",
"numeric",
":",
"-------------",
"max_n_contours",
"-",
">",
"maximum",
"number",
"of",
"possible",
"contours",
"in",
"img",
"max_len_contour",
"-",
">",
"maximum",
"contour",
"length"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/polylinesFromBinImage.py#L14-L73
|
radjkarl/imgProcessor
|
imgProcessor/utils/cdf.py
|
cdf
|
def cdf(arr, pos=None):
'''
Return the cumulative density function of a given array or
its intensity at a given position (0-1)
'''
r = (arr.min(), arr.max())
hist, bin_edges = np.histogram(arr, bins=2 * int(r[1] - r[0]), range=r)
hist = np.asfarray(hist) / hist.sum()
cdf = np.cumsum(hist)
if pos is None:
return cdf
i = np.argmax(cdf > pos)
return bin_edges[i]
|
python
|
def cdf(arr, pos=None):
'''
Return the cumulative density function of a given array or
its intensity at a given position (0-1)
'''
r = (arr.min(), arr.max())
hist, bin_edges = np.histogram(arr, bins=2 * int(r[1] - r[0]), range=r)
hist = np.asfarray(hist) / hist.sum()
cdf = np.cumsum(hist)
if pos is None:
return cdf
i = np.argmax(cdf > pos)
return bin_edges[i]
|
[
"def",
"cdf",
"(",
"arr",
",",
"pos",
"=",
"None",
")",
":",
"r",
"=",
"(",
"arr",
".",
"min",
"(",
")",
",",
"arr",
".",
"max",
"(",
")",
")",
"hist",
",",
"bin_edges",
"=",
"np",
".",
"histogram",
"(",
"arr",
",",
"bins",
"=",
"2",
"*",
"int",
"(",
"r",
"[",
"1",
"]",
"-",
"r",
"[",
"0",
"]",
")",
",",
"range",
"=",
"r",
")",
"hist",
"=",
"np",
".",
"asfarray",
"(",
"hist",
")",
"/",
"hist",
".",
"sum",
"(",
")",
"cdf",
"=",
"np",
".",
"cumsum",
"(",
"hist",
")",
"if",
"pos",
"is",
"None",
":",
"return",
"cdf",
"i",
"=",
"np",
".",
"argmax",
"(",
"cdf",
">",
"pos",
")",
"return",
"bin_edges",
"[",
"i",
"]"
] |
Return the cumulative density function of a given array or
its intensity at a given position (0-1)
|
[
"Return",
"the",
"cumulative",
"density",
"function",
"of",
"a",
"given",
"array",
"or",
"its",
"intensity",
"at",
"a",
"given",
"position",
"(",
"0",
"-",
"1",
")"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/cdf.py#L7-L20
|
radjkarl/imgProcessor
|
imgProcessor/array/subCell2D.py
|
subCell2DGenerator
|
def subCell2DGenerator(arr, shape, d01=None, p01=None):
'''Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
array: sub array
Example:
>>> a = np.array([[[0,1],[1,2]],[[2,3],[3,4]]])
>>> gen = subCell2DGenerator(a,(2,2))
>>> for i,j, sub in gen: print( i,j, sub )
0 0 [[[0 1]]]
0 1 [[[1 2]]]
1 0 [[[2 3]]]
1 1 [[[3 4]]]
'''
for i, j, s0, s1 in subCell2DSlices(arr, shape, d01, p01):
yield i, j, arr[s0, s1]
|
python
|
def subCell2DGenerator(arr, shape, d01=None, p01=None):
'''Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
array: sub array
Example:
>>> a = np.array([[[0,1],[1,2]],[[2,3],[3,4]]])
>>> gen = subCell2DGenerator(a,(2,2))
>>> for i,j, sub in gen: print( i,j, sub )
0 0 [[[0 1]]]
0 1 [[[1 2]]]
1 0 [[[2 3]]]
1 1 [[[3 4]]]
'''
for i, j, s0, s1 in subCell2DSlices(arr, shape, d01, p01):
yield i, j, arr[s0, s1]
|
[
"def",
"subCell2DGenerator",
"(",
"arr",
",",
"shape",
",",
"d01",
"=",
"None",
",",
"p01",
"=",
"None",
")",
":",
"for",
"i",
",",
"j",
",",
"s0",
",",
"s1",
"in",
"subCell2DSlices",
"(",
"arr",
",",
"shape",
",",
"d01",
",",
"p01",
")",
":",
"yield",
"i",
",",
"j",
",",
"arr",
"[",
"s0",
",",
"s1",
"]"
] |
Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
array: sub array
Example:
>>> a = np.array([[[0,1],[1,2]],[[2,3],[3,4]]])
>>> gen = subCell2DGenerator(a,(2,2))
>>> for i,j, sub in gen: print( i,j, sub )
0 0 [[[0 1]]]
0 1 [[[1 2]]]
1 0 [[[2 3]]]
1 1 [[[3 4]]]
|
[
"Generator",
"to",
"access",
"evenly",
"sized",
"sub",
"-",
"cells",
"in",
"a",
"2d",
"array",
"Args",
":",
"shape",
"(",
"tuple",
")",
":",
"number",
"of",
"sub",
"-",
"cells",
"in",
"y",
"x",
"e",
".",
"g",
".",
"(",
"10",
"15",
")",
"d01",
"(",
"tuple",
"optional",
")",
":",
"cell",
"size",
"in",
"y",
"and",
"x",
"p01",
"(",
"tuple",
"optional",
")",
":",
"position",
"of",
"top",
"left",
"edge",
"Returns",
":",
"int",
":",
"1st",
"index",
"int",
":",
"2nd",
"index",
"array",
":",
"sub",
"array",
"Example",
":",
">>>",
"a",
"=",
"np",
".",
"array",
"(",
"[[[",
"0",
"1",
"]",
"[",
"1",
"2",
"]]",
"[[",
"2",
"3",
"]",
"[",
"3",
"4",
"]]]",
")",
">>>",
"gen",
"=",
"subCell2DGenerator",
"(",
"a",
"(",
"2",
"2",
"))",
">>>",
"for",
"i",
"j",
"sub",
"in",
"gen",
":",
"print",
"(",
"i",
"j",
"sub",
")",
"0",
"0",
"[[[",
"0",
"1",
"]]]",
"0",
"1",
"[[[",
"1",
"2",
"]]]",
"1",
"0",
"[[[",
"2",
"3",
"]]]",
"1",
"1",
"[[[",
"3",
"4",
"]]]"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/array/subCell2D.py#L5-L29
|
radjkarl/imgProcessor
|
imgProcessor/array/subCell2D.py
|
subCell2DSlices
|
def subCell2DSlices(arr, shape, d01=None, p01=None):
'''Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
slice: first dimension
slice: 1st dimension
'''
if p01 is not None:
yinit, xinit = p01
else:
xinit, yinit = 0, 0
x, y = xinit, yinit
g0, g1 = shape
s0, s1 = arr.shape[:2]
if d01 is not None:
d0, d1 = d01
else:
d0, d1 = s0 / g0, s1 / g1
y1 = d0 + yinit
for i in range(g0):
for j in range(g1):
x1 = x + d1
yield (i, j, slice(max(0, _rint(y)),
max(0, _rint(y1))),
slice(max(0, _rint(x)),
max(0, _rint(x1))))
x = x1
y = y1
y1 = y + d0
x = xinit
|
python
|
def subCell2DSlices(arr, shape, d01=None, p01=None):
'''Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
slice: first dimension
slice: 1st dimension
'''
if p01 is not None:
yinit, xinit = p01
else:
xinit, yinit = 0, 0
x, y = xinit, yinit
g0, g1 = shape
s0, s1 = arr.shape[:2]
if d01 is not None:
d0, d1 = d01
else:
d0, d1 = s0 / g0, s1 / g1
y1 = d0 + yinit
for i in range(g0):
for j in range(g1):
x1 = x + d1
yield (i, j, slice(max(0, _rint(y)),
max(0, _rint(y1))),
slice(max(0, _rint(x)),
max(0, _rint(x1))))
x = x1
y = y1
y1 = y + d0
x = xinit
|
[
"def",
"subCell2DSlices",
"(",
"arr",
",",
"shape",
",",
"d01",
"=",
"None",
",",
"p01",
"=",
"None",
")",
":",
"if",
"p01",
"is",
"not",
"None",
":",
"yinit",
",",
"xinit",
"=",
"p01",
"else",
":",
"xinit",
",",
"yinit",
"=",
"0",
",",
"0",
"x",
",",
"y",
"=",
"xinit",
",",
"yinit",
"g0",
",",
"g1",
"=",
"shape",
"s0",
",",
"s1",
"=",
"arr",
".",
"shape",
"[",
":",
"2",
"]",
"if",
"d01",
"is",
"not",
"None",
":",
"d0",
",",
"d1",
"=",
"d01",
"else",
":",
"d0",
",",
"d1",
"=",
"s0",
"/",
"g0",
",",
"s1",
"/",
"g1",
"y1",
"=",
"d0",
"+",
"yinit",
"for",
"i",
"in",
"range",
"(",
"g0",
")",
":",
"for",
"j",
"in",
"range",
"(",
"g1",
")",
":",
"x1",
"=",
"x",
"+",
"d1",
"yield",
"(",
"i",
",",
"j",
",",
"slice",
"(",
"max",
"(",
"0",
",",
"_rint",
"(",
"y",
")",
")",
",",
"max",
"(",
"0",
",",
"_rint",
"(",
"y1",
")",
")",
")",
",",
"slice",
"(",
"max",
"(",
"0",
",",
"_rint",
"(",
"x",
")",
")",
",",
"max",
"(",
"0",
",",
"_rint",
"(",
"x1",
")",
")",
")",
")",
"x",
"=",
"x1",
"y",
"=",
"y1",
"y1",
"=",
"y",
"+",
"d0",
"x",
"=",
"xinit"
] |
Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
slice: first dimension
slice: 1st dimension
|
[
"Generator",
"to",
"access",
"evenly",
"sized",
"sub",
"-",
"cells",
"in",
"a",
"2d",
"array",
"Args",
":",
"shape",
"(",
"tuple",
")",
":",
"number",
"of",
"sub",
"-",
"cells",
"in",
"y",
"x",
"e",
".",
"g",
".",
"(",
"10",
"15",
")",
"d01",
"(",
"tuple",
"optional",
")",
":",
"cell",
"size",
"in",
"y",
"and",
"x",
"p01",
"(",
"tuple",
"optional",
")",
":",
"position",
"of",
"top",
"left",
"edge",
"Returns",
":",
"int",
":",
"1st",
"index",
"int",
":",
"2nd",
"index",
"slice",
":",
"first",
"dimension",
"slice",
":",
"1st",
"dimension"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/array/subCell2D.py#L32-L71
|
radjkarl/imgProcessor
|
imgProcessor/array/subCell2D.py
|
subCell2DCoords
|
def subCell2DCoords(*args, **kwargs):
'''Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y)
'''
for _, _, s0, s1 in subCell2DSlices(*args, **kwargs):
yield ((s1.start, s1.start, s1.stop),
(s0.start, s0.stop, s0.stop))
|
python
|
def subCell2DCoords(*args, **kwargs):
'''Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y)
'''
for _, _, s0, s1 in subCell2DSlices(*args, **kwargs):
yield ((s1.start, s1.start, s1.stop),
(s0.start, s0.stop, s0.stop))
|
[
"def",
"subCell2DCoords",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"_",
",",
"_",
",",
"s0",
",",
"s1",
"in",
"subCell2DSlices",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"yield",
"(",
"(",
"s1",
".",
"start",
",",
"s1",
".",
"start",
",",
"s1",
".",
"stop",
")",
",",
"(",
"s0",
".",
"start",
",",
"s0",
".",
"stop",
",",
"s0",
".",
"stop",
")",
")"
] |
Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y)
|
[
"Same",
"as",
"subCell2DSlices",
"but",
"returning",
"coordinates",
"Example",
":",
"g",
"=",
"subCell2DCoords",
"(",
"arr",
"shape",
")",
"for",
"x",
"y",
"in",
"g",
":",
"plt",
".",
"plot",
"(",
"x",
"y",
")"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/array/subCell2D.py#L74-L84
|
radjkarl/imgProcessor
|
imgProcessor/array/subCell2D.py
|
subCell2DFnArray
|
def subCell2DFnArray(arr, fn, shape, dtype=None, **kwargs):
'''
Return array where every cell is the output of a given cell function
Args:
fn (function): ...to be executed on all sub-arrays
Returns:
array: value of every cell equals result of fn(sub-array)
Example:
mx = subCell2DFnArray(myArray, np.max, (10,6) )
- -> here mx is a 2d array containing all cell maxima
'''
sh = list(arr.shape)
sh[:2] = shape
out = np.empty(sh, dtype=dtype)
for i, j, c in subCell2DGenerator(arr, shape, **kwargs):
out[i, j] = fn(c)
return out
|
python
|
def subCell2DFnArray(arr, fn, shape, dtype=None, **kwargs):
'''
Return array where every cell is the output of a given cell function
Args:
fn (function): ...to be executed on all sub-arrays
Returns:
array: value of every cell equals result of fn(sub-array)
Example:
mx = subCell2DFnArray(myArray, np.max, (10,6) )
- -> here mx is a 2d array containing all cell maxima
'''
sh = list(arr.shape)
sh[:2] = shape
out = np.empty(sh, dtype=dtype)
for i, j, c in subCell2DGenerator(arr, shape, **kwargs):
out[i, j] = fn(c)
return out
|
[
"def",
"subCell2DFnArray",
"(",
"arr",
",",
"fn",
",",
"shape",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"sh",
"=",
"list",
"(",
"arr",
".",
"shape",
")",
"sh",
"[",
":",
"2",
"]",
"=",
"shape",
"out",
"=",
"np",
".",
"empty",
"(",
"sh",
",",
"dtype",
"=",
"dtype",
")",
"for",
"i",
",",
"j",
",",
"c",
"in",
"subCell2DGenerator",
"(",
"arr",
",",
"shape",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"[",
"i",
",",
"j",
"]",
"=",
"fn",
"(",
"c",
")",
"return",
"out"
] |
Return array where every cell is the output of a given cell function
Args:
fn (function): ...to be executed on all sub-arrays
Returns:
array: value of every cell equals result of fn(sub-array)
Example:
mx = subCell2DFnArray(myArray, np.max, (10,6) )
- -> here mx is a 2d array containing all cell maxima
|
[
"Return",
"array",
"where",
"every",
"cell",
"is",
"the",
"output",
"of",
"a",
"given",
"cell",
"function",
"Args",
":",
"fn",
"(",
"function",
")",
":",
"...",
"to",
"be",
"executed",
"on",
"all",
"sub",
"-",
"arrays",
"Returns",
":",
"array",
":",
"value",
"of",
"every",
"cell",
"equals",
"result",
"of",
"fn",
"(",
"sub",
"-",
"array",
")",
"Example",
":",
"mx",
"=",
"subCell2DFnArray",
"(",
"myArray",
"np",
".",
"max",
"(",
"10",
"6",
")",
")",
"-",
"-",
">",
"here",
"mx",
"is",
"a",
"2d",
"array",
"containing",
"all",
"cell",
"maxima"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/array/subCell2D.py#L87-L107
|
radjkarl/imgProcessor
|
imgProcessor/equations/defocusThroughDepth.py
|
defocusThroughDepth
|
def defocusThroughDepth(u, uf, f, fn, k=2.355):
'''
return the defocus (mm std) through DOF
u -> scene point (depth value)
uf -> in-focus position (the distance at which the scene point should be placed in order to be focused)
f -> focal length
k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian
fn --> f-number (relative aperture)
equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736
Pertuz et.al. "Analysis of focus measure operators for shape-from-focus"
all parameter should be in same physical unit [mm]
!! assumes spatial invariant blur
'''
# A = f/fn
return (k/fn) * (f**2*abs(u-uf)) / (u*(uf-f))
|
python
|
def defocusThroughDepth(u, uf, f, fn, k=2.355):
'''
return the defocus (mm std) through DOF
u -> scene point (depth value)
uf -> in-focus position (the distance at which the scene point should be placed in order to be focused)
f -> focal length
k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian
fn --> f-number (relative aperture)
equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736
Pertuz et.al. "Analysis of focus measure operators for shape-from-focus"
all parameter should be in same physical unit [mm]
!! assumes spatial invariant blur
'''
# A = f/fn
return (k/fn) * (f**2*abs(u-uf)) / (u*(uf-f))
|
[
"def",
"defocusThroughDepth",
"(",
"u",
",",
"uf",
",",
"f",
",",
"fn",
",",
"k",
"=",
"2.355",
")",
":",
"# A = f/fn \r",
"return",
"(",
"k",
"/",
"fn",
")",
"*",
"(",
"f",
"**",
"2",
"*",
"abs",
"(",
"u",
"-",
"uf",
")",
")",
"/",
"(",
"u",
"*",
"(",
"uf",
"-",
"f",
")",
")"
] |
return the defocus (mm std) through DOF
u -> scene point (depth value)
uf -> in-focus position (the distance at which the scene point should be placed in order to be focused)
f -> focal length
k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian
fn --> f-number (relative aperture)
equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736
Pertuz et.al. "Analysis of focus measure operators for shape-from-focus"
all parameter should be in same physical unit [mm]
!! assumes spatial invariant blur
|
[
"return",
"the",
"defocus",
"(",
"mm",
"std",
")",
"through",
"DOF",
"u",
"-",
">",
"scene",
"point",
"(",
"depth",
"value",
")",
"uf",
"-",
">",
"in",
"-",
"focus",
"position",
"(",
"the",
"distance",
"at",
"which",
"the",
"scene",
"point",
"should",
"be",
"placed",
"in",
"order",
"to",
"be",
"focused",
")",
"f",
"-",
">",
"focal",
"length",
"k",
"-",
">",
"camera",
"dependent",
"constant",
"(",
"transferring",
"blur",
"circle",
"to",
"PSF",
")",
"2",
".",
"335",
"would",
"be",
"FHWD",
"of",
"2dgaussian",
"fn",
"--",
">",
"f",
"-",
"number",
"(",
"relative",
"aperture",
")",
"equation",
"(",
"3",
")",
"taken",
"from",
"http",
":",
"//",
"linkinghub",
".",
"elsevier",
".",
"com",
"/",
"retrieve",
"/",
"pii",
"/",
"S0031320312004736",
"Pertuz",
"et",
".",
"al",
".",
"Analysis",
"of",
"focus",
"measure",
"operators",
"for",
"shape",
"-",
"from",
"-",
"focus",
"all",
"parameter",
"should",
"be",
"in",
"same",
"physical",
"unit",
"[",
"mm",
"]",
"!!",
"assumes",
"spatial",
"invariant",
"blur"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/defocusThroughDepth.py#L4-L22
|
radjkarl/imgProcessor
|
imgProcessor/filters/_extendArrayForConvolution.py
|
extendArrayForConvolution
|
def extendArrayForConvolution(arr, kernelXY,
modex='reflect',
modey='reflect'):
'''
extends a given array right right border handling
for convolution
-->in opposite to skimage and skipy this function
allows to chose different mode = ('reflect', 'wrap')
in x and y direction
only supports 'warp' and 'reflect' at the moment
'''
(kx, ky) = kernelXY
kx//=2
ky//=2
#indexing 0:-0 leads to empty arrays and not the whole thing
#make it easy with assuming ksize=1 and removing extra size later:
nokx = kx == 0
noky = ky == 0
if nokx:
kx = 1
if noky:
ky = 1
s0,s1 = arr.shape
assert ky < s0
assert kx < s1
arr2 = np.zeros((s0+2*ky, s1+2*kx), dtype=arr.dtype)
if kx == 0:
kx = None
arr2[ky:-ky,kx:-kx]=arr
#original array:
t = arr[:ky] #TOP
rb = arr[-1:-ky-1:-1] #reverse bottom
rt = arr[ky-1::-1] #reverse top
rr = arr[:,-1:-kx-1:-1] #reverse right
l = arr[:,:kx] #left
# rtl = arr[ky-1::-1,kx-1::-1]
#filter array:
tm2 = arr2[:ky , kx:-kx] #TOP-MIDDLE
bm2 = arr2[-ky:, kx:-kx] #BOTTOM-MIDDLE
tl2 = arr2[:ky , :kx] #TOP-LEFT
bl2 = arr2[-ky:, :kx] #BOTTOM-LEFT
tr2 = arr2[:ky:, -kx:]#TOP-RIGHT
br2 = arr2[-ky:, -kx:]#TOP-RIGHT
#fill edges:
if modey == 'warp':
tm2[:] = t
bm2[:] = rb
tl2[:] = arr2[2*ky:ky:-1,:kx]
bl2[:] = arr2[-ky-1:-2*ky-1:-1,:kx]
#TODO: do other options!!!
elif modey == 'reflect':
tm2[:] = rt
bm2[:] = rb
if modex =='reflect':
tl2[:] = arr[ky-1::-1,kx-1::-1]
bl2[:] = arr[-1:-ky-1:-1,kx-1::-1]
tr2[:] = arr[:ky,-kx:][::-1,::-1]
br2[:] = arr[-ky:,-kx:][::-1,::-1]
else:#warp
tl2[:] = arr[ky-1::-1 , -kx:]
bl2[:] = arr[-1:-ky-1:-1 , -kx:]
tr2[:] = arr[ky-1::-1 , :kx]
br2[:] = arr[-1:-ky-1:-1 , :kx]
else:
raise Exception('modey not supported')
if modex == 'wrap':
arr2[ky:-ky,kx-1::-1] = rr
arr2[ky:-ky,-kx:] = l
elif modex == 'reflect':
arr2[ky:-ky,:kx] = l[:,::-1]
arr2[ky:-ky,-kx:] = rr
else:
raise Exception('modex not supported')
if nokx:
arr2 = arr2[:,1:-1]
if noky:
arr2 = arr2[1:-1]
return arr2
|
python
|
def extendArrayForConvolution(arr, kernelXY,
modex='reflect',
modey='reflect'):
'''
extends a given array right right border handling
for convolution
-->in opposite to skimage and skipy this function
allows to chose different mode = ('reflect', 'wrap')
in x and y direction
only supports 'warp' and 'reflect' at the moment
'''
(kx, ky) = kernelXY
kx//=2
ky//=2
#indexing 0:-0 leads to empty arrays and not the whole thing
#make it easy with assuming ksize=1 and removing extra size later:
nokx = kx == 0
noky = ky == 0
if nokx:
kx = 1
if noky:
ky = 1
s0,s1 = arr.shape
assert ky < s0
assert kx < s1
arr2 = np.zeros((s0+2*ky, s1+2*kx), dtype=arr.dtype)
if kx == 0:
kx = None
arr2[ky:-ky,kx:-kx]=arr
#original array:
t = arr[:ky] #TOP
rb = arr[-1:-ky-1:-1] #reverse bottom
rt = arr[ky-1::-1] #reverse top
rr = arr[:,-1:-kx-1:-1] #reverse right
l = arr[:,:kx] #left
# rtl = arr[ky-1::-1,kx-1::-1]
#filter array:
tm2 = arr2[:ky , kx:-kx] #TOP-MIDDLE
bm2 = arr2[-ky:, kx:-kx] #BOTTOM-MIDDLE
tl2 = arr2[:ky , :kx] #TOP-LEFT
bl2 = arr2[-ky:, :kx] #BOTTOM-LEFT
tr2 = arr2[:ky:, -kx:]#TOP-RIGHT
br2 = arr2[-ky:, -kx:]#TOP-RIGHT
#fill edges:
if modey == 'warp':
tm2[:] = t
bm2[:] = rb
tl2[:] = arr2[2*ky:ky:-1,:kx]
bl2[:] = arr2[-ky-1:-2*ky-1:-1,:kx]
#TODO: do other options!!!
elif modey == 'reflect':
tm2[:] = rt
bm2[:] = rb
if modex =='reflect':
tl2[:] = arr[ky-1::-1,kx-1::-1]
bl2[:] = arr[-1:-ky-1:-1,kx-1::-1]
tr2[:] = arr[:ky,-kx:][::-1,::-1]
br2[:] = arr[-ky:,-kx:][::-1,::-1]
else:#warp
tl2[:] = arr[ky-1::-1 , -kx:]
bl2[:] = arr[-1:-ky-1:-1 , -kx:]
tr2[:] = arr[ky-1::-1 , :kx]
br2[:] = arr[-1:-ky-1:-1 , :kx]
else:
raise Exception('modey not supported')
if modex == 'wrap':
arr2[ky:-ky,kx-1::-1] = rr
arr2[ky:-ky,-kx:] = l
elif modex == 'reflect':
arr2[ky:-ky,:kx] = l[:,::-1]
arr2[ky:-ky,-kx:] = rr
else:
raise Exception('modex not supported')
if nokx:
arr2 = arr2[:,1:-1]
if noky:
arr2 = arr2[1:-1]
return arr2
|
[
"def",
"extendArrayForConvolution",
"(",
"arr",
",",
"kernelXY",
",",
"modex",
"=",
"'reflect'",
",",
"modey",
"=",
"'reflect'",
")",
":",
"(",
"kx",
",",
"ky",
")",
"=",
"kernelXY",
"kx",
"//=",
"2",
"ky",
"//=",
"2",
"#indexing 0:-0 leads to empty arrays and not the whole thing\r",
"#make it easy with assuming ksize=1 and removing extra size later:\r",
"nokx",
"=",
"kx",
"==",
"0",
"noky",
"=",
"ky",
"==",
"0",
"if",
"nokx",
":",
"kx",
"=",
"1",
"if",
"noky",
":",
"ky",
"=",
"1",
"s0",
",",
"s1",
"=",
"arr",
".",
"shape",
"assert",
"ky",
"<",
"s0",
"assert",
"kx",
"<",
"s1",
"arr2",
"=",
"np",
".",
"zeros",
"(",
"(",
"s0",
"+",
"2",
"*",
"ky",
",",
"s1",
"+",
"2",
"*",
"kx",
")",
",",
"dtype",
"=",
"arr",
".",
"dtype",
")",
"if",
"kx",
"==",
"0",
":",
"kx",
"=",
"None",
"arr2",
"[",
"ky",
":",
"-",
"ky",
",",
"kx",
":",
"-",
"kx",
"]",
"=",
"arr",
"#original array:\r",
"t",
"=",
"arr",
"[",
":",
"ky",
"]",
"#TOP\r",
"rb",
"=",
"arr",
"[",
"-",
"1",
":",
"-",
"ky",
"-",
"1",
":",
"-",
"1",
"]",
"#reverse bottom\r",
"rt",
"=",
"arr",
"[",
"ky",
"-",
"1",
":",
":",
"-",
"1",
"]",
"#reverse top\r",
"rr",
"=",
"arr",
"[",
":",
",",
"-",
"1",
":",
"-",
"kx",
"-",
"1",
":",
"-",
"1",
"]",
"#reverse right\r",
"l",
"=",
"arr",
"[",
":",
",",
":",
"kx",
"]",
"#left\r",
"# rtl = arr[ky-1::-1,kx-1::-1]\r",
"#filter array:\r",
"tm2",
"=",
"arr2",
"[",
":",
"ky",
",",
"kx",
":",
"-",
"kx",
"]",
"#TOP-MIDDLE\r",
"bm2",
"=",
"arr2",
"[",
"-",
"ky",
":",
",",
"kx",
":",
"-",
"kx",
"]",
"#BOTTOM-MIDDLE\r",
"tl2",
"=",
"arr2",
"[",
":",
"ky",
",",
":",
"kx",
"]",
"#TOP-LEFT\r",
"bl2",
"=",
"arr2",
"[",
"-",
"ky",
":",
",",
":",
"kx",
"]",
"#BOTTOM-LEFT\r",
"tr2",
"=",
"arr2",
"[",
":",
"ky",
":",
",",
"-",
"kx",
":",
"]",
"#TOP-RIGHT\r",
"br2",
"=",
"arr2",
"[",
"-",
"ky",
":",
",",
"-",
"kx",
":",
"]",
"#TOP-RIGHT\r",
"#fill edges:\r",
"if",
"modey",
"==",
"'warp'",
":",
"tm2",
"[",
":",
"]",
"=",
"t",
"bm2",
"[",
":",
"]",
"=",
"rb",
"tl2",
"[",
":",
"]",
"=",
"arr2",
"[",
"2",
"*",
"ky",
":",
"ky",
":",
"-",
"1",
",",
":",
"kx",
"]",
"bl2",
"[",
":",
"]",
"=",
"arr2",
"[",
"-",
"ky",
"-",
"1",
":",
"-",
"2",
"*",
"ky",
"-",
"1",
":",
"-",
"1",
",",
":",
"kx",
"]",
"#TODO: do other options!!! \r",
"elif",
"modey",
"==",
"'reflect'",
":",
"tm2",
"[",
":",
"]",
"=",
"rt",
"bm2",
"[",
":",
"]",
"=",
"rb",
"if",
"modex",
"==",
"'reflect'",
":",
"tl2",
"[",
":",
"]",
"=",
"arr",
"[",
"ky",
"-",
"1",
":",
":",
"-",
"1",
",",
"kx",
"-",
"1",
":",
":",
"-",
"1",
"]",
"bl2",
"[",
":",
"]",
"=",
"arr",
"[",
"-",
"1",
":",
"-",
"ky",
"-",
"1",
":",
"-",
"1",
",",
"kx",
"-",
"1",
":",
":",
"-",
"1",
"]",
"tr2",
"[",
":",
"]",
"=",
"arr",
"[",
":",
"ky",
",",
"-",
"kx",
":",
"]",
"[",
":",
":",
"-",
"1",
",",
":",
":",
"-",
"1",
"]",
"br2",
"[",
":",
"]",
"=",
"arr",
"[",
"-",
"ky",
":",
",",
"-",
"kx",
":",
"]",
"[",
":",
":",
"-",
"1",
",",
":",
":",
"-",
"1",
"]",
"else",
":",
"#warp\r",
"tl2",
"[",
":",
"]",
"=",
"arr",
"[",
"ky",
"-",
"1",
":",
":",
"-",
"1",
",",
"-",
"kx",
":",
"]",
"bl2",
"[",
":",
"]",
"=",
"arr",
"[",
"-",
"1",
":",
"-",
"ky",
"-",
"1",
":",
"-",
"1",
",",
"-",
"kx",
":",
"]",
"tr2",
"[",
":",
"]",
"=",
"arr",
"[",
"ky",
"-",
"1",
":",
":",
"-",
"1",
",",
":",
"kx",
"]",
"br2",
"[",
":",
"]",
"=",
"arr",
"[",
"-",
"1",
":",
"-",
"ky",
"-",
"1",
":",
"-",
"1",
",",
":",
"kx",
"]",
"else",
":",
"raise",
"Exception",
"(",
"'modey not supported'",
")",
"if",
"modex",
"==",
"'wrap'",
":",
"arr2",
"[",
"ky",
":",
"-",
"ky",
",",
"kx",
"-",
"1",
":",
":",
"-",
"1",
"]",
"=",
"rr",
"arr2",
"[",
"ky",
":",
"-",
"ky",
",",
"-",
"kx",
":",
"]",
"=",
"l",
"elif",
"modex",
"==",
"'reflect'",
":",
"arr2",
"[",
"ky",
":",
"-",
"ky",
",",
":",
"kx",
"]",
"=",
"l",
"[",
":",
",",
":",
":",
"-",
"1",
"]",
"arr2",
"[",
"ky",
":",
"-",
"ky",
",",
"-",
"kx",
":",
"]",
"=",
"rr",
"else",
":",
"raise",
"Exception",
"(",
"'modex not supported'",
")",
"if",
"nokx",
":",
"arr2",
"=",
"arr2",
"[",
":",
",",
"1",
":",
"-",
"1",
"]",
"if",
"noky",
":",
"arr2",
"=",
"arr2",
"[",
"1",
":",
"-",
"1",
"]",
"return",
"arr2"
] |
extends a given array right right border handling
for convolution
-->in opposite to skimage and skipy this function
allows to chose different mode = ('reflect', 'wrap')
in x and y direction
only supports 'warp' and 'reflect' at the moment
|
[
"extends",
"a",
"given",
"array",
"right",
"right",
"border",
"handling",
"for",
"convolution",
"--",
">",
"in",
"opposite",
"to",
"skimage",
"and",
"skipy",
"this",
"function",
"allows",
"to",
"chose",
"different",
"mode",
"=",
"(",
"reflect",
"wrap",
")",
"in",
"x",
"and",
"y",
"direction",
"only",
"supports",
"warp",
"and",
"reflect",
"at",
"the",
"moment"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/_extendArrayForConvolution.py#L5-L97
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.calibrate
|
def calibrate(self, board_size=(8, 6), method='Chessboard', images=[],
max_images=100, sensorSize_mm=None,
detect_sensible=True):
'''
sensorSize_mm - (width, height) [mm] Physical size of the sensor
'''
self._coeffs = {}
self.opts = {'foundPattern': [], # whether pattern could be found for image
'size': board_size,
'imgs': [], # list of either npArrsays or img paths
# list or 2d coords. of found pattern features (e.g.
# chessboard corners)
'imgPoints': []
}
self._detect_sensible = detect_sensible
self.method = {'Chessboard': self._findChessboard,
'Symmetric circles': self._findSymmetricCircles,
'Asymmetric circles': self._findAsymmetricCircles,
'Manual': None
# TODO: 'Image grid':FindGridInImage
}[method]
self.max_images = max_images
self.findCount = 0
self.apertureSize = sensorSize_mm
self.objp = self._mkObjPoints(board_size)
if method == 'Asymmetric circles':
# this pattern have its points (every 2. row) displaced, so:
i = self.objp[:, 1] % 2 == 1
self.objp[:, 0] *= 2
self.objp[i, 0] += 1
# Arrays to store object points and image points from all the images.
self.objpoints = [] # 3d point in real world space
# self.imgpoints = [] # 2d points in image plane.
self.mapx, self.mapy = None, None
# from matplotlib import pyplot as plt
for n, i in enumerate(images):
print('working on image %s' % n)
if self.addImg(i):
print('OK')
|
python
|
def calibrate(self, board_size=(8, 6), method='Chessboard', images=[],
max_images=100, sensorSize_mm=None,
detect_sensible=True):
'''
sensorSize_mm - (width, height) [mm] Physical size of the sensor
'''
self._coeffs = {}
self.opts = {'foundPattern': [], # whether pattern could be found for image
'size': board_size,
'imgs': [], # list of either npArrsays or img paths
# list or 2d coords. of found pattern features (e.g.
# chessboard corners)
'imgPoints': []
}
self._detect_sensible = detect_sensible
self.method = {'Chessboard': self._findChessboard,
'Symmetric circles': self._findSymmetricCircles,
'Asymmetric circles': self._findAsymmetricCircles,
'Manual': None
# TODO: 'Image grid':FindGridInImage
}[method]
self.max_images = max_images
self.findCount = 0
self.apertureSize = sensorSize_mm
self.objp = self._mkObjPoints(board_size)
if method == 'Asymmetric circles':
# this pattern have its points (every 2. row) displaced, so:
i = self.objp[:, 1] % 2 == 1
self.objp[:, 0] *= 2
self.objp[i, 0] += 1
# Arrays to store object points and image points from all the images.
self.objpoints = [] # 3d point in real world space
# self.imgpoints = [] # 2d points in image plane.
self.mapx, self.mapy = None, None
# from matplotlib import pyplot as plt
for n, i in enumerate(images):
print('working on image %s' % n)
if self.addImg(i):
print('OK')
|
[
"def",
"calibrate",
"(",
"self",
",",
"board_size",
"=",
"(",
"8",
",",
"6",
")",
",",
"method",
"=",
"'Chessboard'",
",",
"images",
"=",
"[",
"]",
",",
"max_images",
"=",
"100",
",",
"sensorSize_mm",
"=",
"None",
",",
"detect_sensible",
"=",
"True",
")",
":",
"self",
".",
"_coeffs",
"=",
"{",
"}",
"self",
".",
"opts",
"=",
"{",
"'foundPattern'",
":",
"[",
"]",
",",
"# whether pattern could be found for image\r",
"'size'",
":",
"board_size",
",",
"'imgs'",
":",
"[",
"]",
",",
"# list of either npArrsays or img paths\r",
"# list or 2d coords. of found pattern features (e.g.\r",
"# chessboard corners)\r",
"'imgPoints'",
":",
"[",
"]",
"}",
"self",
".",
"_detect_sensible",
"=",
"detect_sensible",
"self",
".",
"method",
"=",
"{",
"'Chessboard'",
":",
"self",
".",
"_findChessboard",
",",
"'Symmetric circles'",
":",
"self",
".",
"_findSymmetricCircles",
",",
"'Asymmetric circles'",
":",
"self",
".",
"_findAsymmetricCircles",
",",
"'Manual'",
":",
"None",
"# TODO: 'Image grid':FindGridInImage\r",
"}",
"[",
"method",
"]",
"self",
".",
"max_images",
"=",
"max_images",
"self",
".",
"findCount",
"=",
"0",
"self",
".",
"apertureSize",
"=",
"sensorSize_mm",
"self",
".",
"objp",
"=",
"self",
".",
"_mkObjPoints",
"(",
"board_size",
")",
"if",
"method",
"==",
"'Asymmetric circles'",
":",
"# this pattern have its points (every 2. row) displaced, so:\r",
"i",
"=",
"self",
".",
"objp",
"[",
":",
",",
"1",
"]",
"%",
"2",
"==",
"1",
"self",
".",
"objp",
"[",
":",
",",
"0",
"]",
"*=",
"2",
"self",
".",
"objp",
"[",
"i",
",",
"0",
"]",
"+=",
"1",
"# Arrays to store object points and image points from all the images.\r",
"self",
".",
"objpoints",
"=",
"[",
"]",
"# 3d point in real world space\r",
"# self.imgpoints = [] # 2d points in image plane.\r",
"self",
".",
"mapx",
",",
"self",
".",
"mapy",
"=",
"None",
",",
"None",
"# from matplotlib import pyplot as plt\r",
"for",
"n",
",",
"i",
"in",
"enumerate",
"(",
"images",
")",
":",
"print",
"(",
"'working on image %s'",
"%",
"n",
")",
"if",
"self",
".",
"addImg",
"(",
"i",
")",
":",
"print",
"(",
"'OK'",
")"
] |
sensorSize_mm - (width, height) [mm] Physical size of the sensor
|
[
"sensorSize_mm",
"-",
"(",
"width",
"height",
")",
"[",
"mm",
"]",
"Physical",
"size",
"of",
"the",
"sensor"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L35-L80
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.addPoints
|
def addPoints(self, points, board_size=None):
'''
add corner points directly instead of extracting them from
image
points = ( (0,1), (...),... ) [x,y]
'''
self.opts['foundPattern'].append(True)
self.findCount += 1
if board_size is not None:
self.objpoints.append(self._mkObjPoints(board_size))
else:
self.objpoints.append(self.objp)
s0 = points.shape[0]
self.opts['imgPoints'].append(np.asarray(points).reshape(
s0, 1, 2).astype(np.float32))
|
python
|
def addPoints(self, points, board_size=None):
'''
add corner points directly instead of extracting them from
image
points = ( (0,1), (...),... ) [x,y]
'''
self.opts['foundPattern'].append(True)
self.findCount += 1
if board_size is not None:
self.objpoints.append(self._mkObjPoints(board_size))
else:
self.objpoints.append(self.objp)
s0 = points.shape[0]
self.opts['imgPoints'].append(np.asarray(points).reshape(
s0, 1, 2).astype(np.float32))
|
[
"def",
"addPoints",
"(",
"self",
",",
"points",
",",
"board_size",
"=",
"None",
")",
":",
"self",
".",
"opts",
"[",
"'foundPattern'",
"]",
".",
"append",
"(",
"True",
")",
"self",
".",
"findCount",
"+=",
"1",
"if",
"board_size",
"is",
"not",
"None",
":",
"self",
".",
"objpoints",
".",
"append",
"(",
"self",
".",
"_mkObjPoints",
"(",
"board_size",
")",
")",
"else",
":",
"self",
".",
"objpoints",
".",
"append",
"(",
"self",
".",
"objp",
")",
"s0",
"=",
"points",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"opts",
"[",
"'imgPoints'",
"]",
".",
"append",
"(",
"np",
".",
"asarray",
"(",
"points",
")",
".",
"reshape",
"(",
"s0",
",",
"1",
",",
"2",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")"
] |
add corner points directly instead of extracting them from
image
points = ( (0,1), (...),... ) [x,y]
|
[
"add",
"corner",
"points",
"directly",
"instead",
"of",
"extracting",
"them",
"from",
"image",
"points",
"=",
"(",
"(",
"0",
"1",
")",
"(",
"...",
")",
"...",
")",
"[",
"x",
"y",
"]"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L91-L106
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.setImgShape
|
def setImgShape(self, shape):
'''
image shape must be known for calculating camera matrix
if method==Manual and addPoints is used instead of addImg
this method must be called before .coeffs are obtained
'''
self.img = type('Dummy', (object,), {})
# if imgProcessor.ARRAYS_ORDER_IS_XY:
# self.img.shape = shape[::-1]
# else:
self.img.shape = shape
|
python
|
def setImgShape(self, shape):
'''
image shape must be known for calculating camera matrix
if method==Manual and addPoints is used instead of addImg
this method must be called before .coeffs are obtained
'''
self.img = type('Dummy', (object,), {})
# if imgProcessor.ARRAYS_ORDER_IS_XY:
# self.img.shape = shape[::-1]
# else:
self.img.shape = shape
|
[
"def",
"setImgShape",
"(",
"self",
",",
"shape",
")",
":",
"self",
".",
"img",
"=",
"type",
"(",
"'Dummy'",
",",
"(",
"object",
",",
")",
",",
"{",
"}",
")",
"# if imgProcessor.ARRAYS_ORDER_IS_XY:\r",
"# self.img.shape = shape[::-1]\r",
"# else:\r",
"self",
".",
"img",
".",
"shape",
"=",
"shape"
] |
image shape must be known for calculating camera matrix
if method==Manual and addPoints is used instead of addImg
this method must be called before .coeffs are obtained
|
[
"image",
"shape",
"must",
"be",
"known",
"for",
"calculating",
"camera",
"matrix",
"if",
"method",
"==",
"Manual",
"and",
"addPoints",
"is",
"used",
"instead",
"of",
"addImg",
"this",
"method",
"must",
"be",
"called",
"before",
".",
"coeffs",
"are",
"obtained"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L108-L118
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.addImgStream
|
def addImgStream(self, img):
'''
add images using a continous stream
- stop when max number of images is reached
'''
if self.findCount > self.max_images:
raise EnoughImages('have enough images')
return self.addImg(img)
|
python
|
def addImgStream(self, img):
'''
add images using a continous stream
- stop when max number of images is reached
'''
if self.findCount > self.max_images:
raise EnoughImages('have enough images')
return self.addImg(img)
|
[
"def",
"addImgStream",
"(",
"self",
",",
"img",
")",
":",
"if",
"self",
".",
"findCount",
">",
"self",
".",
"max_images",
":",
"raise",
"EnoughImages",
"(",
"'have enough images'",
")",
"return",
"self",
".",
"addImg",
"(",
"img",
")"
] |
add images using a continous stream
- stop when max number of images is reached
|
[
"add",
"images",
"using",
"a",
"continous",
"stream",
"-",
"stop",
"when",
"max",
"number",
"of",
"images",
"is",
"reached"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L120-L127
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.addImg
|
def addImg(self, img):
'''
add one chessboard image for detection lens distortion
'''
# self.opts['imgs'].append(img)
self.img = imread(img, 'gray', 'uint8')
didFindCorners, corners = self.method()
self.opts['foundPattern'].append(didFindCorners)
if didFindCorners:
self.findCount += 1
self.objpoints.append(self.objp)
self.opts['imgPoints'].append(corners)
return didFindCorners
|
python
|
def addImg(self, img):
'''
add one chessboard image for detection lens distortion
'''
# self.opts['imgs'].append(img)
self.img = imread(img, 'gray', 'uint8')
didFindCorners, corners = self.method()
self.opts['foundPattern'].append(didFindCorners)
if didFindCorners:
self.findCount += 1
self.objpoints.append(self.objp)
self.opts['imgPoints'].append(corners)
return didFindCorners
|
[
"def",
"addImg",
"(",
"self",
",",
"img",
")",
":",
"# self.opts['imgs'].append(img)\r",
"self",
".",
"img",
"=",
"imread",
"(",
"img",
",",
"'gray'",
",",
"'uint8'",
")",
"didFindCorners",
",",
"corners",
"=",
"self",
".",
"method",
"(",
")",
"self",
".",
"opts",
"[",
"'foundPattern'",
"]",
".",
"append",
"(",
"didFindCorners",
")",
"if",
"didFindCorners",
":",
"self",
".",
"findCount",
"+=",
"1",
"self",
".",
"objpoints",
".",
"append",
"(",
"self",
".",
"objp",
")",
"self",
".",
"opts",
"[",
"'imgPoints'",
"]",
".",
"append",
"(",
"corners",
")",
"return",
"didFindCorners"
] |
add one chessboard image for detection lens distortion
|
[
"add",
"one",
"chessboard",
"image",
"for",
"detection",
"lens",
"distortion"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L129-L144
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.getCoeffStr
|
def getCoeffStr(self):
'''
get the distortion coeffs in a formated string
'''
txt = ''
for key, val in self.coeffs.items():
txt += '%s = %s\n' % (key, val)
return txt
|
python
|
def getCoeffStr(self):
'''
get the distortion coeffs in a formated string
'''
txt = ''
for key, val in self.coeffs.items():
txt += '%s = %s\n' % (key, val)
return txt
|
[
"def",
"getCoeffStr",
"(",
"self",
")",
":",
"txt",
"=",
"''",
"for",
"key",
",",
"val",
"in",
"self",
".",
"coeffs",
".",
"items",
"(",
")",
":",
"txt",
"+=",
"'%s = %s\\n'",
"%",
"(",
"key",
",",
"val",
")",
"return",
"txt"
] |
get the distortion coeffs in a formated string
|
[
"get",
"the",
"distortion",
"coeffs",
"in",
"a",
"formated",
"string"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L177-L184
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.drawChessboard
|
def drawChessboard(self, img=None):
'''
draw a grid fitting to the last added image
on this one or an extra image
img == None
==False -> draw chessbord on empty image
==img
'''
assert self.findCount > 0, 'cannot draw chessboard if nothing found'
if img is None:
img = self.img
elif isinstance(img, bool) and not img:
img = np.zeros(shape=(self.img.shape), dtype=self.img.dtype)
else:
img = imread(img, dtype='uint8')
gray = False
if img.ndim == 2:
gray = True
# need a color 8 bit image
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# Draw and display the corners
cv2.drawChessboardCorners(img, self.opts['size'],
self.opts['imgPoints'][-1],
self.opts['foundPattern'][-1])
if gray:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
|
python
|
def drawChessboard(self, img=None):
'''
draw a grid fitting to the last added image
on this one or an extra image
img == None
==False -> draw chessbord on empty image
==img
'''
assert self.findCount > 0, 'cannot draw chessboard if nothing found'
if img is None:
img = self.img
elif isinstance(img, bool) and not img:
img = np.zeros(shape=(self.img.shape), dtype=self.img.dtype)
else:
img = imread(img, dtype='uint8')
gray = False
if img.ndim == 2:
gray = True
# need a color 8 bit image
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# Draw and display the corners
cv2.drawChessboardCorners(img, self.opts['size'],
self.opts['imgPoints'][-1],
self.opts['foundPattern'][-1])
if gray:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
|
[
"def",
"drawChessboard",
"(",
"self",
",",
"img",
"=",
"None",
")",
":",
"assert",
"self",
".",
"findCount",
">",
"0",
",",
"'cannot draw chessboard if nothing found'",
"if",
"img",
"is",
"None",
":",
"img",
"=",
"self",
".",
"img",
"elif",
"isinstance",
"(",
"img",
",",
"bool",
")",
"and",
"not",
"img",
":",
"img",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"img",
".",
"shape",
")",
",",
"dtype",
"=",
"self",
".",
"img",
".",
"dtype",
")",
"else",
":",
"img",
"=",
"imread",
"(",
"img",
",",
"dtype",
"=",
"'uint8'",
")",
"gray",
"=",
"False",
"if",
"img",
".",
"ndim",
"==",
"2",
":",
"gray",
"=",
"True",
"# need a color 8 bit image\r",
"img",
"=",
"cv2",
".",
"cvtColor",
"(",
"img",
",",
"cv2",
".",
"COLOR_GRAY2BGR",
")",
"# Draw and display the corners\r",
"cv2",
".",
"drawChessboardCorners",
"(",
"img",
",",
"self",
".",
"opts",
"[",
"'size'",
"]",
",",
"self",
".",
"opts",
"[",
"'imgPoints'",
"]",
"[",
"-",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'foundPattern'",
"]",
"[",
"-",
"1",
"]",
")",
"if",
"gray",
":",
"img",
"=",
"cv2",
".",
"cvtColor",
"(",
"img",
",",
"cv2",
".",
"COLOR_BGR2GRAY",
")",
"return",
"img"
] |
draw a grid fitting to the last added image
on this one or an extra image
img == None
==False -> draw chessbord on empty image
==img
|
[
"draw",
"a",
"grid",
"fitting",
"to",
"the",
"last",
"added",
"image",
"on",
"this",
"one",
"or",
"an",
"extra",
"image",
"img",
"==",
"None",
"==",
"False",
"-",
">",
"draw",
"chessbord",
"on",
"empty",
"image",
"==",
"img"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L186-L212
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.writeToFile
|
def writeToFile(self, filename, saveOpts=False):
'''
write the distortion coeffs to file
saveOpts --> Whether so save calibration options (and not just results)
'''
try:
if not filename.endswith('.%s' % self.ftype):
filename += '.%s' % self.ftype
s = {'coeffs': self.coeffs}
if saveOpts:
s['opts'] = self.opts
# else:
# s['opts':{}]
np.savez(filename, **s)
return filename
except AttributeError:
raise Exception(
'need to calibrate camera before calibration can be saved to file')
|
python
|
def writeToFile(self, filename, saveOpts=False):
'''
write the distortion coeffs to file
saveOpts --> Whether so save calibration options (and not just results)
'''
try:
if not filename.endswith('.%s' % self.ftype):
filename += '.%s' % self.ftype
s = {'coeffs': self.coeffs}
if saveOpts:
s['opts'] = self.opts
# else:
# s['opts':{}]
np.savez(filename, **s)
return filename
except AttributeError:
raise Exception(
'need to calibrate camera before calibration can be saved to file')
|
[
"def",
"writeToFile",
"(",
"self",
",",
"filename",
",",
"saveOpts",
"=",
"False",
")",
":",
"try",
":",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.%s'",
"%",
"self",
".",
"ftype",
")",
":",
"filename",
"+=",
"'.%s'",
"%",
"self",
".",
"ftype",
"s",
"=",
"{",
"'coeffs'",
":",
"self",
".",
"coeffs",
"}",
"if",
"saveOpts",
":",
"s",
"[",
"'opts'",
"]",
"=",
"self",
".",
"opts",
"# else:\r",
"# s['opts':{}]\r",
"np",
".",
"savez",
"(",
"filename",
",",
"*",
"*",
"s",
")",
"return",
"filename",
"except",
"AttributeError",
":",
"raise",
"Exception",
"(",
"'need to calibrate camera before calibration can be saved to file'",
")"
] |
write the distortion coeffs to file
saveOpts --> Whether so save calibration options (and not just results)
|
[
"write",
"the",
"distortion",
"coeffs",
"to",
"file",
"saveOpts",
"--",
">",
"Whether",
"so",
"save",
"calibration",
"options",
"(",
"and",
"not",
"just",
"results",
")"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L258-L275
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.readFromFile
|
def readFromFile(self, filename):
'''
read the distortion coeffs from file
'''
s = dict(np.load(filename))
try:
self.coeffs = s['coeffs'][()]
except KeyError:
#LEGENCY - remove
self.coeffs = s
try:
self.opts = s['opts'][()]
except KeyError:
pass
return self.coeffs
|
python
|
def readFromFile(self, filename):
'''
read the distortion coeffs from file
'''
s = dict(np.load(filename))
try:
self.coeffs = s['coeffs'][()]
except KeyError:
#LEGENCY - remove
self.coeffs = s
try:
self.opts = s['opts'][()]
except KeyError:
pass
return self.coeffs
|
[
"def",
"readFromFile",
"(",
"self",
",",
"filename",
")",
":",
"s",
"=",
"dict",
"(",
"np",
".",
"load",
"(",
"filename",
")",
")",
"try",
":",
"self",
".",
"coeffs",
"=",
"s",
"[",
"'coeffs'",
"]",
"[",
"(",
")",
"]",
"except",
"KeyError",
":",
"#LEGENCY - remove\r",
"self",
".",
"coeffs",
"=",
"s",
"try",
":",
"self",
".",
"opts",
"=",
"s",
"[",
"'opts'",
"]",
"[",
"(",
")",
"]",
"except",
"KeyError",
":",
"pass",
"return",
"self",
".",
"coeffs"
] |
read the distortion coeffs from file
|
[
"read",
"the",
"distortion",
"coeffs",
"from",
"file"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L277-L291
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.undistortPoints
|
def undistortPoints(self, points, keepSize=False):
'''
points --> list of (x,y) coordinates
'''
s = self.img.shape
cam = self.coeffs['cameraMatrix']
d = self.coeffs['distortionCoeffs']
pts = np.asarray(points, dtype=np.float32)
if pts.ndim == 2:
pts = np.expand_dims(pts, axis=0)
(newCameraMatrix, roi) = cv2.getOptimalNewCameraMatrix(cam,
d, s[::-1], 1,
s[::-1])
if not keepSize:
xx, yy = roi[:2]
pts[0, 0] -= xx
pts[0, 1] -= yy
return cv2.undistortPoints(pts,
cam, d, P=newCameraMatrix)
|
python
|
def undistortPoints(self, points, keepSize=False):
'''
points --> list of (x,y) coordinates
'''
s = self.img.shape
cam = self.coeffs['cameraMatrix']
d = self.coeffs['distortionCoeffs']
pts = np.asarray(points, dtype=np.float32)
if pts.ndim == 2:
pts = np.expand_dims(pts, axis=0)
(newCameraMatrix, roi) = cv2.getOptimalNewCameraMatrix(cam,
d, s[::-1], 1,
s[::-1])
if not keepSize:
xx, yy = roi[:2]
pts[0, 0] -= xx
pts[0, 1] -= yy
return cv2.undistortPoints(pts,
cam, d, P=newCameraMatrix)
|
[
"def",
"undistortPoints",
"(",
"self",
",",
"points",
",",
"keepSize",
"=",
"False",
")",
":",
"s",
"=",
"self",
".",
"img",
".",
"shape",
"cam",
"=",
"self",
".",
"coeffs",
"[",
"'cameraMatrix'",
"]",
"d",
"=",
"self",
".",
"coeffs",
"[",
"'distortionCoeffs'",
"]",
"pts",
"=",
"np",
".",
"asarray",
"(",
"points",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"if",
"pts",
".",
"ndim",
"==",
"2",
":",
"pts",
"=",
"np",
".",
"expand_dims",
"(",
"pts",
",",
"axis",
"=",
"0",
")",
"(",
"newCameraMatrix",
",",
"roi",
")",
"=",
"cv2",
".",
"getOptimalNewCameraMatrix",
"(",
"cam",
",",
"d",
",",
"s",
"[",
":",
":",
"-",
"1",
"]",
",",
"1",
",",
"s",
"[",
":",
":",
"-",
"1",
"]",
")",
"if",
"not",
"keepSize",
":",
"xx",
",",
"yy",
"=",
"roi",
"[",
":",
"2",
"]",
"pts",
"[",
"0",
",",
"0",
"]",
"-=",
"xx",
"pts",
"[",
"0",
",",
"1",
"]",
"-=",
"yy",
"return",
"cv2",
".",
"undistortPoints",
"(",
"pts",
",",
"cam",
",",
"d",
",",
"P",
"=",
"newCameraMatrix",
")"
] |
points --> list of (x,y) coordinates
|
[
"points",
"--",
">",
"list",
"of",
"(",
"x",
"y",
")",
"coordinates"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L293-L314
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.correct
|
def correct(self, image, keepSize=False, borderValue=0):
'''
remove lens distortion from given image
'''
image = imread(image)
(h, w) = image.shape[:2]
mapx, mapy = self.getUndistortRectifyMap(w, h)
self.img = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=borderValue
)
if not keepSize:
xx, yy, ww, hh = self.roi
self.img = self.img[yy: yy + hh, xx: xx + ww]
return self.img
|
python
|
def correct(self, image, keepSize=False, borderValue=0):
'''
remove lens distortion from given image
'''
image = imread(image)
(h, w) = image.shape[:2]
mapx, mapy = self.getUndistortRectifyMap(w, h)
self.img = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=borderValue
)
if not keepSize:
xx, yy, ww, hh = self.roi
self.img = self.img[yy: yy + hh, xx: xx + ww]
return self.img
|
[
"def",
"correct",
"(",
"self",
",",
"image",
",",
"keepSize",
"=",
"False",
",",
"borderValue",
"=",
"0",
")",
":",
"image",
"=",
"imread",
"(",
"image",
")",
"(",
"h",
",",
"w",
")",
"=",
"image",
".",
"shape",
"[",
":",
"2",
"]",
"mapx",
",",
"mapy",
"=",
"self",
".",
"getUndistortRectifyMap",
"(",
"w",
",",
"h",
")",
"self",
".",
"img",
"=",
"cv2",
".",
"remap",
"(",
"image",
",",
"mapx",
",",
"mapy",
",",
"cv2",
".",
"INTER_LINEAR",
",",
"borderMode",
"=",
"cv2",
".",
"BORDER_CONSTANT",
",",
"borderValue",
"=",
"borderValue",
")",
"if",
"not",
"keepSize",
":",
"xx",
",",
"yy",
",",
"ww",
",",
"hh",
"=",
"self",
".",
"roi",
"self",
".",
"img",
"=",
"self",
".",
"img",
"[",
"yy",
":",
"yy",
"+",
"hh",
",",
"xx",
":",
"xx",
"+",
"ww",
"]",
"return",
"self",
".",
"img"
] |
remove lens distortion from given image
|
[
"remove",
"lens",
"distortion",
"from",
"given",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L316-L330
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.distortImage
|
def distortImage(self, image):
'''
opposite of 'correct'
'''
image = imread(image)
(imgHeight, imgWidth) = image.shape[:2]
mapx, mapy = self.getDistortRectifyMap(imgWidth, imgHeight)
return cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR,
borderValue=(0, 0, 0))
|
python
|
def distortImage(self, image):
'''
opposite of 'correct'
'''
image = imread(image)
(imgHeight, imgWidth) = image.shape[:2]
mapx, mapy = self.getDistortRectifyMap(imgWidth, imgHeight)
return cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR,
borderValue=(0, 0, 0))
|
[
"def",
"distortImage",
"(",
"self",
",",
"image",
")",
":",
"image",
"=",
"imread",
"(",
"image",
")",
"(",
"imgHeight",
",",
"imgWidth",
")",
"=",
"image",
".",
"shape",
"[",
":",
"2",
"]",
"mapx",
",",
"mapy",
"=",
"self",
".",
"getDistortRectifyMap",
"(",
"imgWidth",
",",
"imgHeight",
")",
"return",
"cv2",
".",
"remap",
"(",
"image",
",",
"mapx",
",",
"mapy",
",",
"cv2",
".",
"INTER_LINEAR",
",",
"borderValue",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
")"
] |
opposite of 'correct'
|
[
"opposite",
"of",
"correct"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L332-L340
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.getCameraParams
|
def getCameraParams(self):
'''
value positions based on
http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap
'''
c = self.coeffs['cameraMatrix']
fx = c[0][0]
fy = c[1][1]
cx = c[0][2]
cy = c[1][2]
k1, k2, p1, p2, k3 = tuple(self.coeffs['distortionCoeffs'].tolist()[0])
return fx, fy, cx, cy, k1, k2, k3, p1, p2
|
python
|
def getCameraParams(self):
'''
value positions based on
http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap
'''
c = self.coeffs['cameraMatrix']
fx = c[0][0]
fy = c[1][1]
cx = c[0][2]
cy = c[1][2]
k1, k2, p1, p2, k3 = tuple(self.coeffs['distortionCoeffs'].tolist()[0])
return fx, fy, cx, cy, k1, k2, k3, p1, p2
|
[
"def",
"getCameraParams",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"coeffs",
"[",
"'cameraMatrix'",
"]",
"fx",
"=",
"c",
"[",
"0",
"]",
"[",
"0",
"]",
"fy",
"=",
"c",
"[",
"1",
"]",
"[",
"1",
"]",
"cx",
"=",
"c",
"[",
"0",
"]",
"[",
"2",
"]",
"cy",
"=",
"c",
"[",
"1",
"]",
"[",
"2",
"]",
"k1",
",",
"k2",
",",
"p1",
",",
"p2",
",",
"k3",
"=",
"tuple",
"(",
"self",
".",
"coeffs",
"[",
"'distortionCoeffs'",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
")",
"return",
"fx",
",",
"fy",
",",
"cx",
",",
"cy",
",",
"k1",
",",
"k2",
",",
"k3",
",",
"p1",
",",
"p2"
] |
value positions based on
http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap
|
[
"value",
"positions",
"based",
"on",
"http",
":",
"//",
"docs",
".",
"opencv",
".",
"org",
"/",
"modules",
"/",
"imgproc",
"/",
"doc",
"/",
"geometric_transformations",
".",
"html#cv",
".",
"InitUndistortRectifyMap"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L360-L371
|
radjkarl/imgProcessor
|
imgProcessor/camera/LensDistortion.py
|
LensDistortion.standardUncertainties
|
def standardUncertainties(self, sharpness=0.5):
'''
sharpness -> image sharpness // std of Gaussian PSF [px]
returns a list of standard uncertainties for the x and y component:
(1x,2x), (1y, 2y), (intensity:None)
1. px-size-changes(due to deflection)
2. reprojection error
'''
height, width = self.coeffs['shape']
fx, fy = self.getDeflection(width, height)
# is RMSE of imgPoint-projectedPoints
r = self.coeffs['reprojectionError']
t = (sharpness**2 + r**2)**0.5
return fx * t, fy * t
|
python
|
def standardUncertainties(self, sharpness=0.5):
'''
sharpness -> image sharpness // std of Gaussian PSF [px]
returns a list of standard uncertainties for the x and y component:
(1x,2x), (1y, 2y), (intensity:None)
1. px-size-changes(due to deflection)
2. reprojection error
'''
height, width = self.coeffs['shape']
fx, fy = self.getDeflection(width, height)
# is RMSE of imgPoint-projectedPoints
r = self.coeffs['reprojectionError']
t = (sharpness**2 + r**2)**0.5
return fx * t, fy * t
|
[
"def",
"standardUncertainties",
"(",
"self",
",",
"sharpness",
"=",
"0.5",
")",
":",
"height",
",",
"width",
"=",
"self",
".",
"coeffs",
"[",
"'shape'",
"]",
"fx",
",",
"fy",
"=",
"self",
".",
"getDeflection",
"(",
"width",
",",
"height",
")",
"# is RMSE of imgPoint-projectedPoints\r",
"r",
"=",
"self",
".",
"coeffs",
"[",
"'reprojectionError'",
"]",
"t",
"=",
"(",
"sharpness",
"**",
"2",
"+",
"r",
"**",
"2",
")",
"**",
"0.5",
"return",
"fx",
"*",
"t",
",",
"fy",
"*",
"t"
] |
sharpness -> image sharpness // std of Gaussian PSF [px]
returns a list of standard uncertainties for the x and y component:
(1x,2x), (1y, 2y), (intensity:None)
1. px-size-changes(due to deflection)
2. reprojection error
|
[
"sharpness",
"-",
">",
"image",
"sharpness",
"//",
"std",
"of",
"Gaussian",
"PSF",
"[",
"px",
"]",
"returns",
"a",
"list",
"of",
"standard",
"uncertainties",
"for",
"the",
"x",
"and",
"y",
"component",
":",
"(",
"1x",
"2x",
")",
"(",
"1y",
"2y",
")",
"(",
"intensity",
":",
"None",
")",
"1",
".",
"px",
"-",
"size",
"-",
"changes",
"(",
"due",
"to",
"deflection",
")",
"2",
".",
"reprojection",
"error"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L404-L418
|
radjkarl/imgProcessor
|
imgProcessor/filters/edgesFromBoolImg.py
|
edgesFromBoolImg
|
def edgesFromBoolImg(arr, dtype=None):
'''
takes a binary image (usually a mask)
and returns the edges of the object inside
'''
out = np.zeros_like(arr, dtype=dtype)
_calc(arr, out)
_calc(arr.T, out.T)
return out
|
python
|
def edgesFromBoolImg(arr, dtype=None):
'''
takes a binary image (usually a mask)
and returns the edges of the object inside
'''
out = np.zeros_like(arr, dtype=dtype)
_calc(arr, out)
_calc(arr.T, out.T)
return out
|
[
"def",
"edgesFromBoolImg",
"(",
"arr",
",",
"dtype",
"=",
"None",
")",
":",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"arr",
",",
"dtype",
"=",
"dtype",
")",
"_calc",
"(",
"arr",
",",
"out",
")",
"_calc",
"(",
"arr",
".",
"T",
",",
"out",
".",
"T",
")",
"return",
"out"
] |
takes a binary image (usually a mask)
and returns the edges of the object inside
|
[
"takes",
"a",
"binary",
"image",
"(",
"usually",
"a",
"mask",
")",
"and",
"returns",
"the",
"edges",
"of",
"the",
"object",
"inside"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/edgesFromBoolImg.py#L5-L13
|
radjkarl/imgProcessor
|
imgProcessor/features/PatternRecognition.py
|
draw_matches
|
def draw_matches(img1, kp1, img2, kp2, matches, color=None, thickness=2, r=15):
"""Draws lines between matching keypoints of two images.
Keypoints not in a matching pair are not drawn.
Places the images side by side in a new image and draws circles
around each keypoint, with line segments connecting matching pairs.
You can tweak the r, thickness, and figsize values as needed.
Args:
img1: An openCV image ndarray in a grayscale or color format.
kp1: A list of cv2.KeyPoint objects for img1.
img2: An openCV image ndarray of the same format and with the same
element type as img1.
kp2: A list of cv2.KeyPoint objects for img2.
matches: A list of DMatch objects whose trainIdx attribute refers to
img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
color: The color of the circles and connecting lines drawn on the images.
A 3-tuple for color images, a scalar for grayscale images. If None, these
values are randomly generated.
"""
# We're drawing them side by side. Get dimensions accordingly.
# Handle both color and grayscale images.
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[
1] + img2.shape[1], img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (
max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1])
new_img = np.zeros(new_shape, type(img1.flat[0]))
# Place images onto the new image.
new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
new_img[0:img2.shape[0], img1.shape[1]
:img1.shape[1] + img2.shape[1]] = img2
# Draw lines between matches. Make sure to offset kp coords in second
# image appropriately.
if color:
c = color
for m in matches:
# Generate random color for RGB/BGR and grayscale images as needed.
if not color:
c = np.random.randint(0, 256, 3) if len(
img1.shape) == 3 else np.random.randint(0, 256)
# So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,
# wants locs as a tuple of ints.
end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(
int) + np.array([img1.shape[1], 0]))
cv2.line(new_img, end1, end2, c, thickness)
cv2.circle(new_img, end1, r, c, thickness)
cv2.circle(new_img, end2, r, c, thickness)
return new_img
|
python
|
def draw_matches(img1, kp1, img2, kp2, matches, color=None, thickness=2, r=15):
"""Draws lines between matching keypoints of two images.
Keypoints not in a matching pair are not drawn.
Places the images side by side in a new image and draws circles
around each keypoint, with line segments connecting matching pairs.
You can tweak the r, thickness, and figsize values as needed.
Args:
img1: An openCV image ndarray in a grayscale or color format.
kp1: A list of cv2.KeyPoint objects for img1.
img2: An openCV image ndarray of the same format and with the same
element type as img1.
kp2: A list of cv2.KeyPoint objects for img2.
matches: A list of DMatch objects whose trainIdx attribute refers to
img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
color: The color of the circles and connecting lines drawn on the images.
A 3-tuple for color images, a scalar for grayscale images. If None, these
values are randomly generated.
"""
# We're drawing them side by side. Get dimensions accordingly.
# Handle both color and grayscale images.
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[
1] + img2.shape[1], img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (
max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1])
new_img = np.zeros(new_shape, type(img1.flat[0]))
# Place images onto the new image.
new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
new_img[0:img2.shape[0], img1.shape[1]
:img1.shape[1] + img2.shape[1]] = img2
# Draw lines between matches. Make sure to offset kp coords in second
# image appropriately.
if color:
c = color
for m in matches:
# Generate random color for RGB/BGR and grayscale images as needed.
if not color:
c = np.random.randint(0, 256, 3) if len(
img1.shape) == 3 else np.random.randint(0, 256)
# So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,
# wants locs as a tuple of ints.
end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(
int) + np.array([img1.shape[1], 0]))
cv2.line(new_img, end1, end2, c, thickness)
cv2.circle(new_img, end1, r, c, thickness)
cv2.circle(new_img, end2, r, c, thickness)
return new_img
|
[
"def",
"draw_matches",
"(",
"img1",
",",
"kp1",
",",
"img2",
",",
"kp2",
",",
"matches",
",",
"color",
"=",
"None",
",",
"thickness",
"=",
"2",
",",
"r",
"=",
"15",
")",
":",
"# We're drawing them side by side. Get dimensions accordingly.\r",
"# Handle both color and grayscale images.\r",
"if",
"len",
"(",
"img1",
".",
"shape",
")",
"==",
"3",
":",
"new_shape",
"=",
"(",
"max",
"(",
"img1",
".",
"shape",
"[",
"0",
"]",
",",
"img2",
".",
"shape",
"[",
"0",
"]",
")",
",",
"img1",
".",
"shape",
"[",
"1",
"]",
"+",
"img2",
".",
"shape",
"[",
"1",
"]",
",",
"img1",
".",
"shape",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"img1",
".",
"shape",
")",
"==",
"2",
":",
"new_shape",
"=",
"(",
"max",
"(",
"img1",
".",
"shape",
"[",
"0",
"]",
",",
"img2",
".",
"shape",
"[",
"0",
"]",
")",
",",
"img1",
".",
"shape",
"[",
"1",
"]",
"+",
"img2",
".",
"shape",
"[",
"1",
"]",
")",
"new_img",
"=",
"np",
".",
"zeros",
"(",
"new_shape",
",",
"type",
"(",
"img1",
".",
"flat",
"[",
"0",
"]",
")",
")",
"# Place images onto the new image.\r",
"new_img",
"[",
"0",
":",
"img1",
".",
"shape",
"[",
"0",
"]",
",",
"0",
":",
"img1",
".",
"shape",
"[",
"1",
"]",
"]",
"=",
"img1",
"new_img",
"[",
"0",
":",
"img2",
".",
"shape",
"[",
"0",
"]",
",",
"img1",
".",
"shape",
"[",
"1",
"]",
":",
"img1",
".",
"shape",
"[",
"1",
"]",
"+",
"img2",
".",
"shape",
"[",
"1",
"]",
"]",
"=",
"img2",
"# Draw lines between matches. Make sure to offset kp coords in second\r",
"# image appropriately.\r",
"if",
"color",
":",
"c",
"=",
"color",
"for",
"m",
"in",
"matches",
":",
"# Generate random color for RGB/BGR and grayscale images as needed.\r",
"if",
"not",
"color",
":",
"c",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"256",
",",
"3",
")",
"if",
"len",
"(",
"img1",
".",
"shape",
")",
"==",
"3",
"else",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"256",
")",
"# So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,\r",
"# wants locs as a tuple of ints.\r",
"end1",
"=",
"tuple",
"(",
"np",
".",
"round",
"(",
"kp1",
"[",
"m",
".",
"trainIdx",
"]",
".",
"pt",
")",
".",
"astype",
"(",
"int",
")",
")",
"end2",
"=",
"tuple",
"(",
"np",
".",
"round",
"(",
"kp2",
"[",
"m",
".",
"queryIdx",
"]",
".",
"pt",
")",
".",
"astype",
"(",
"int",
")",
"+",
"np",
".",
"array",
"(",
"[",
"img1",
".",
"shape",
"[",
"1",
"]",
",",
"0",
"]",
")",
")",
"cv2",
".",
"line",
"(",
"new_img",
",",
"end1",
",",
"end2",
",",
"c",
",",
"thickness",
")",
"cv2",
".",
"circle",
"(",
"new_img",
",",
"end1",
",",
"r",
",",
"c",
",",
"thickness",
")",
"cv2",
".",
"circle",
"(",
"new_img",
",",
"end2",
",",
"r",
",",
"c",
",",
"thickness",
")",
"return",
"new_img"
] |
Draws lines between matching keypoints of two images.
Keypoints not in a matching pair are not drawn.
Places the images side by side in a new image and draws circles
around each keypoint, with line segments connecting matching pairs.
You can tweak the r, thickness, and figsize values as needed.
Args:
img1: An openCV image ndarray in a grayscale or color format.
kp1: A list of cv2.KeyPoint objects for img1.
img2: An openCV image ndarray of the same format and with the same
element type as img1.
kp2: A list of cv2.KeyPoint objects for img2.
matches: A list of DMatch objects whose trainIdx attribute refers to
img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
color: The color of the circles and connecting lines drawn on the images.
A 3-tuple for color images, a scalar for grayscale images. If None, these
values are randomly generated.
|
[
"Draws",
"lines",
"between",
"matching",
"keypoints",
"of",
"two",
"images",
".",
"Keypoints",
"not",
"in",
"a",
"matching",
"pair",
"are",
"not",
"drawn",
".",
"Places",
"the",
"images",
"side",
"by",
"side",
"in",
"a",
"new",
"image",
"and",
"draws",
"circles",
"around",
"each",
"keypoint",
"with",
"line",
"segments",
"connecting",
"matching",
"pairs",
".",
"You",
"can",
"tweak",
"the",
"r",
"thickness",
"and",
"figsize",
"values",
"as",
"needed",
".",
"Args",
":",
"img1",
":",
"An",
"openCV",
"image",
"ndarray",
"in",
"a",
"grayscale",
"or",
"color",
"format",
".",
"kp1",
":",
"A",
"list",
"of",
"cv2",
".",
"KeyPoint",
"objects",
"for",
"img1",
".",
"img2",
":",
"An",
"openCV",
"image",
"ndarray",
"of",
"the",
"same",
"format",
"and",
"with",
"the",
"same",
"element",
"type",
"as",
"img1",
".",
"kp2",
":",
"A",
"list",
"of",
"cv2",
".",
"KeyPoint",
"objects",
"for",
"img2",
".",
"matches",
":",
"A",
"list",
"of",
"DMatch",
"objects",
"whose",
"trainIdx",
"attribute",
"refers",
"to",
"img1",
"keypoints",
"and",
"whose",
"queryIdx",
"attribute",
"refers",
"to",
"img2",
"keypoints",
".",
"color",
":",
"The",
"color",
"of",
"the",
"circles",
"and",
"connecting",
"lines",
"drawn",
"on",
"the",
"images",
".",
"A",
"3",
"-",
"tuple",
"for",
"color",
"images",
"a",
"scalar",
"for",
"grayscale",
"images",
".",
"If",
"None",
"these",
"values",
"are",
"randomly",
"generated",
"."
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/PatternRecognition.py#L203-L252
|
radjkarl/imgProcessor
|
imgProcessor/features/PatternRecognition.py
|
PatternRecognition._scaleTo8bit
|
def _scaleTo8bit(self, img):
'''
The pattern comparator need images to be 8 bit
-> find the range of the signal and scale the image
'''
r = scaleSignalCutParams(img, 0.02) # , nSigma=3)
self.signal_ranges.append(r)
return toUIntArray(img, dtype=np.uint8, range=r)
|
python
|
def _scaleTo8bit(self, img):
'''
The pattern comparator need images to be 8 bit
-> find the range of the signal and scale the image
'''
r = scaleSignalCutParams(img, 0.02) # , nSigma=3)
self.signal_ranges.append(r)
return toUIntArray(img, dtype=np.uint8, range=r)
|
[
"def",
"_scaleTo8bit",
"(",
"self",
",",
"img",
")",
":",
"r",
"=",
"scaleSignalCutParams",
"(",
"img",
",",
"0.02",
")",
"# , nSigma=3)\r",
"self",
".",
"signal_ranges",
".",
"append",
"(",
"r",
")",
"return",
"toUIntArray",
"(",
"img",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"range",
"=",
"r",
")"
] |
The pattern comparator need images to be 8 bit
-> find the range of the signal and scale the image
|
[
"The",
"pattern",
"comparator",
"need",
"images",
"to",
"be",
"8",
"bit",
"-",
">",
"find",
"the",
"range",
"of",
"the",
"signal",
"and",
"scale",
"the",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/PatternRecognition.py#L89-L96
|
radjkarl/imgProcessor
|
imgProcessor/features/PatternRecognition.py
|
PatternRecognition.findHomography
|
def findHomography(self, img, drawMatches=False):
'''
Find homography of the image through pattern
comparison with the base image
'''
print("\t Finding points...")
# Find points in the next frame
img = self._prepareImage(img)
features, descs = self.detector.detectAndCompute(img, None)
######################
# TODO: CURRENTLY BROKEN IN OPENCV3.1 - WAITNG FOR NEW RELEASE 3.2
# matches = self.matcher.knnMatch(descs,#.astype(np.float32),
# self.base_descs,
# k=3)
# print("\t Match Count: ", len(matches))
# matches_subset = self._filterMatches(matches)
# its working alternative (for now):
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches_subset = bf.match(descs, self.base_descs)
######################
# matches = bf.knnMatch(descs,self.base_descs, k=2)
# # Apply ratio test
# matches_subset = []
# medDist = np.median([m.distance for m in matches])
# matches_subset = [m for m in matches if m.distance < medDist]
# for m in matches:
# print(m.distance)
# for m,n in matches:
# if m.distance < 0.75*n.distance:
# matches_subset.append([m])
if not len(matches_subset):
raise Exception('no matches found')
print("\t Filtered Match Count: ", len(matches_subset))
distance = sum([m.distance for m in matches_subset])
print("\t Distance from Key Image: ", distance)
averagePointDistance = distance / (len(matches_subset))
print("\t Average Distance: ", averagePointDistance)
kp1 = []
kp2 = []
for match in matches_subset:
kp1.append(self.base_features[match.trainIdx])
kp2.append(features[match.queryIdx])
# /self._fH #scale with _fH, if image was resized
p1 = np.array([k.pt for k in kp1])
p2 = np.array([k.pt for k in kp2]) # /self._fH
H, status = cv2.findHomography(p1, p2,
cv2.RANSAC, # method
5.0 # max reprojection error (1...10)
)
if status is None:
raise Exception('no homography found')
else:
inliers = np.sum(status)
print('%d / %d inliers/matched' % (inliers, len(status)))
inlierRatio = inliers / len(status)
if self.minInlierRatio > inlierRatio or inliers < self.minInliers:
raise Exception('bad fit!')
# scale with _fH, if image was resized
# see
# http://answers.opencv.org/question/26173/the-relationship-between-homography-matrix-and-scaling-images/
s = np.eye(3, 3)
s[0, 0] = 1 / self._fH
s[1, 1] = 1 / self._fH
H = s.dot(H).dot(np.linalg.inv(s))
if drawMatches:
# s0,s1 = img.shape
# out = np.empty(shape=(s0,s1,3), dtype=np.uint8)
img = draw_matches(self.base8bit, self.base_features, img, features,
matches_subset[:20], # None,#out,
# flags=2
thickness=5
)
return (H, inliers, inlierRatio, averagePointDistance,
img, features,
descs, len(matches_subset))
|
python
|
def findHomography(self, img, drawMatches=False):
'''
Find homography of the image through pattern
comparison with the base image
'''
print("\t Finding points...")
# Find points in the next frame
img = self._prepareImage(img)
features, descs = self.detector.detectAndCompute(img, None)
######################
# TODO: CURRENTLY BROKEN IN OPENCV3.1 - WAITNG FOR NEW RELEASE 3.2
# matches = self.matcher.knnMatch(descs,#.astype(np.float32),
# self.base_descs,
# k=3)
# print("\t Match Count: ", len(matches))
# matches_subset = self._filterMatches(matches)
# its working alternative (for now):
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches_subset = bf.match(descs, self.base_descs)
######################
# matches = bf.knnMatch(descs,self.base_descs, k=2)
# # Apply ratio test
# matches_subset = []
# medDist = np.median([m.distance for m in matches])
# matches_subset = [m for m in matches if m.distance < medDist]
# for m in matches:
# print(m.distance)
# for m,n in matches:
# if m.distance < 0.75*n.distance:
# matches_subset.append([m])
if not len(matches_subset):
raise Exception('no matches found')
print("\t Filtered Match Count: ", len(matches_subset))
distance = sum([m.distance for m in matches_subset])
print("\t Distance from Key Image: ", distance)
averagePointDistance = distance / (len(matches_subset))
print("\t Average Distance: ", averagePointDistance)
kp1 = []
kp2 = []
for match in matches_subset:
kp1.append(self.base_features[match.trainIdx])
kp2.append(features[match.queryIdx])
# /self._fH #scale with _fH, if image was resized
p1 = np.array([k.pt for k in kp1])
p2 = np.array([k.pt for k in kp2]) # /self._fH
H, status = cv2.findHomography(p1, p2,
cv2.RANSAC, # method
5.0 # max reprojection error (1...10)
)
if status is None:
raise Exception('no homography found')
else:
inliers = np.sum(status)
print('%d / %d inliers/matched' % (inliers, len(status)))
inlierRatio = inliers / len(status)
if self.minInlierRatio > inlierRatio or inliers < self.minInliers:
raise Exception('bad fit!')
# scale with _fH, if image was resized
# see
# http://answers.opencv.org/question/26173/the-relationship-between-homography-matrix-and-scaling-images/
s = np.eye(3, 3)
s[0, 0] = 1 / self._fH
s[1, 1] = 1 / self._fH
H = s.dot(H).dot(np.linalg.inv(s))
if drawMatches:
# s0,s1 = img.shape
# out = np.empty(shape=(s0,s1,3), dtype=np.uint8)
img = draw_matches(self.base8bit, self.base_features, img, features,
matches_subset[:20], # None,#out,
# flags=2
thickness=5
)
return (H, inliers, inlierRatio, averagePointDistance,
img, features,
descs, len(matches_subset))
|
[
"def",
"findHomography",
"(",
"self",
",",
"img",
",",
"drawMatches",
"=",
"False",
")",
":",
"print",
"(",
"\"\\t Finding points...\"",
")",
"# Find points in the next frame\r",
"img",
"=",
"self",
".",
"_prepareImage",
"(",
"img",
")",
"features",
",",
"descs",
"=",
"self",
".",
"detector",
".",
"detectAndCompute",
"(",
"img",
",",
"None",
")",
"######################\r",
"# TODO: CURRENTLY BROKEN IN OPENCV3.1 - WAITNG FOR NEW RELEASE 3.2\r",
"# matches = self.matcher.knnMatch(descs,#.astype(np.float32),\r",
"# self.base_descs,\r",
"# k=3)\r",
"# print(\"\\t Match Count: \", len(matches))\r",
"# matches_subset = self._filterMatches(matches)\r",
"# its working alternative (for now):\r",
"bf",
"=",
"cv2",
".",
"BFMatcher",
"(",
"cv2",
".",
"NORM_HAMMING",
",",
"crossCheck",
"=",
"True",
")",
"matches_subset",
"=",
"bf",
".",
"match",
"(",
"descs",
",",
"self",
".",
"base_descs",
")",
"######################\r",
"# matches = bf.knnMatch(descs,self.base_descs, k=2)\r",
"# # Apply ratio test\r",
"# matches_subset = []\r",
"# medDist = np.median([m.distance for m in matches])\r",
"# matches_subset = [m for m in matches if m.distance < medDist]\r",
"# for m in matches:\r",
"# print(m.distance)\r",
"# for m,n in matches:\r",
"# if m.distance < 0.75*n.distance:\r",
"# matches_subset.append([m])\r",
"if",
"not",
"len",
"(",
"matches_subset",
")",
":",
"raise",
"Exception",
"(",
"'no matches found'",
")",
"print",
"(",
"\"\\t Filtered Match Count: \"",
",",
"len",
"(",
"matches_subset",
")",
")",
"distance",
"=",
"sum",
"(",
"[",
"m",
".",
"distance",
"for",
"m",
"in",
"matches_subset",
"]",
")",
"print",
"(",
"\"\\t Distance from Key Image: \"",
",",
"distance",
")",
"averagePointDistance",
"=",
"distance",
"/",
"(",
"len",
"(",
"matches_subset",
")",
")",
"print",
"(",
"\"\\t Average Distance: \"",
",",
"averagePointDistance",
")",
"kp1",
"=",
"[",
"]",
"kp2",
"=",
"[",
"]",
"for",
"match",
"in",
"matches_subset",
":",
"kp1",
".",
"append",
"(",
"self",
".",
"base_features",
"[",
"match",
".",
"trainIdx",
"]",
")",
"kp2",
".",
"append",
"(",
"features",
"[",
"match",
".",
"queryIdx",
"]",
")",
"# /self._fH #scale with _fH, if image was resized\r",
"p1",
"=",
"np",
".",
"array",
"(",
"[",
"k",
".",
"pt",
"for",
"k",
"in",
"kp1",
"]",
")",
"p2",
"=",
"np",
".",
"array",
"(",
"[",
"k",
".",
"pt",
"for",
"k",
"in",
"kp2",
"]",
")",
"# /self._fH\r",
"H",
",",
"status",
"=",
"cv2",
".",
"findHomography",
"(",
"p1",
",",
"p2",
",",
"cv2",
".",
"RANSAC",
",",
"# method\r",
"5.0",
"# max reprojection error (1...10)\r",
")",
"if",
"status",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'no homography found'",
")",
"else",
":",
"inliers",
"=",
"np",
".",
"sum",
"(",
"status",
")",
"print",
"(",
"'%d / %d inliers/matched'",
"%",
"(",
"inliers",
",",
"len",
"(",
"status",
")",
")",
")",
"inlierRatio",
"=",
"inliers",
"/",
"len",
"(",
"status",
")",
"if",
"self",
".",
"minInlierRatio",
">",
"inlierRatio",
"or",
"inliers",
"<",
"self",
".",
"minInliers",
":",
"raise",
"Exception",
"(",
"'bad fit!'",
")",
"# scale with _fH, if image was resized\r",
"# see\r",
"# http://answers.opencv.org/question/26173/the-relationship-between-homography-matrix-and-scaling-images/\r",
"s",
"=",
"np",
".",
"eye",
"(",
"3",
",",
"3",
")",
"s",
"[",
"0",
",",
"0",
"]",
"=",
"1",
"/",
"self",
".",
"_fH",
"s",
"[",
"1",
",",
"1",
"]",
"=",
"1",
"/",
"self",
".",
"_fH",
"H",
"=",
"s",
".",
"dot",
"(",
"H",
")",
".",
"dot",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"s",
")",
")",
"if",
"drawMatches",
":",
"# s0,s1 = img.shape\r",
"# out = np.empty(shape=(s0,s1,3), dtype=np.uint8)\r",
"img",
"=",
"draw_matches",
"(",
"self",
".",
"base8bit",
",",
"self",
".",
"base_features",
",",
"img",
",",
"features",
",",
"matches_subset",
"[",
":",
"20",
"]",
",",
"# None,#out,\r",
"# flags=2\r",
"thickness",
"=",
"5",
")",
"return",
"(",
"H",
",",
"inliers",
",",
"inlierRatio",
",",
"averagePointDistance",
",",
"img",
",",
"features",
",",
"descs",
",",
"len",
"(",
"matches_subset",
")",
")"
] |
Find homography of the image through pattern
comparison with the base image
|
[
"Find",
"homography",
"of",
"the",
"image",
"through",
"pattern",
"comparison",
"with",
"the",
"base",
"image"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/PatternRecognition.py#L108-L196
|
radjkarl/imgProcessor
|
imgProcessor/generate/patterns.py
|
patCircles
|
def patCircles(s0):
'''make circle array'''
arr = np.zeros((s0,s0), dtype=np.uint8)
col = 255
for rad in np.linspace(s0,s0/7.,10):
cv2.circle(arr, (0,0), int(round(rad)), color=col,
thickness=-1, lineType=cv2.LINE_AA )
if col:
col = 0
else:
col = 255
return arr.astype(float)
|
python
|
def patCircles(s0):
'''make circle array'''
arr = np.zeros((s0,s0), dtype=np.uint8)
col = 255
for rad in np.linspace(s0,s0/7.,10):
cv2.circle(arr, (0,0), int(round(rad)), color=col,
thickness=-1, lineType=cv2.LINE_AA )
if col:
col = 0
else:
col = 255
return arr.astype(float)
|
[
"def",
"patCircles",
"(",
"s0",
")",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"s0",
",",
"s0",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"col",
"=",
"255",
"for",
"rad",
"in",
"np",
".",
"linspace",
"(",
"s0",
",",
"s0",
"/",
"7.",
",",
"10",
")",
":",
"cv2",
".",
"circle",
"(",
"arr",
",",
"(",
"0",
",",
"0",
")",
",",
"int",
"(",
"round",
"(",
"rad",
")",
")",
",",
"color",
"=",
"col",
",",
"thickness",
"=",
"-",
"1",
",",
"lineType",
"=",
"cv2",
".",
"LINE_AA",
")",
"if",
"col",
":",
"col",
"=",
"0",
"else",
":",
"col",
"=",
"255",
"return",
"arr",
".",
"astype",
"(",
"float",
")"
] |
make circle array
|
[
"make",
"circle",
"array"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/generate/patterns.py#L5-L18
|
radjkarl/imgProcessor
|
imgProcessor/generate/patterns.py
|
patCrossLines
|
def patCrossLines(s0):
'''make line pattern'''
arr = np.zeros((s0,s0), dtype=np.uint8)
col = 255
t = int(s0/100.)
for pos in np.logspace(0.01,1,10):
pos = int(round((pos-0.5)*s0/10.))
cv2.line(arr, (0,pos), (s0,pos), color=col,
thickness=t, lineType=cv2.LINE_AA )
cv2.line(arr, (pos,0), (pos,s0), color=col,
thickness=t, lineType=cv2.LINE_AA )
return arr.astype(float)
|
python
|
def patCrossLines(s0):
'''make line pattern'''
arr = np.zeros((s0,s0), dtype=np.uint8)
col = 255
t = int(s0/100.)
for pos in np.logspace(0.01,1,10):
pos = int(round((pos-0.5)*s0/10.))
cv2.line(arr, (0,pos), (s0,pos), color=col,
thickness=t, lineType=cv2.LINE_AA )
cv2.line(arr, (pos,0), (pos,s0), color=col,
thickness=t, lineType=cv2.LINE_AA )
return arr.astype(float)
|
[
"def",
"patCrossLines",
"(",
"s0",
")",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"s0",
",",
"s0",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"col",
"=",
"255",
"t",
"=",
"int",
"(",
"s0",
"/",
"100.",
")",
"for",
"pos",
"in",
"np",
".",
"logspace",
"(",
"0.01",
",",
"1",
",",
"10",
")",
":",
"pos",
"=",
"int",
"(",
"round",
"(",
"(",
"pos",
"-",
"0.5",
")",
"*",
"s0",
"/",
"10.",
")",
")",
"cv2",
".",
"line",
"(",
"arr",
",",
"(",
"0",
",",
"pos",
")",
",",
"(",
"s0",
",",
"pos",
")",
",",
"color",
"=",
"col",
",",
"thickness",
"=",
"t",
",",
"lineType",
"=",
"cv2",
".",
"LINE_AA",
")",
"cv2",
".",
"line",
"(",
"arr",
",",
"(",
"pos",
",",
"0",
")",
",",
"(",
"pos",
",",
"s0",
")",
",",
"color",
"=",
"col",
",",
"thickness",
"=",
"t",
",",
"lineType",
"=",
"cv2",
".",
"LINE_AA",
")",
"return",
"arr",
".",
"astype",
"(",
"float",
")"
] |
make line pattern
|
[
"make",
"line",
"pattern"
] |
train
|
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/generate/patterns.py#L21-L33
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.