code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
'''given two tensors N x I x K and N x K x J return N dot products If either x or y is 2-dimensional, broadcast it over all N. Dot products are size N x I x J. Example: x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]]) y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]]) print dot_n(x,y) array([[[ 9, 12, 15], [ 19, 26, 33], [ 29, 40, 51]], [[129, 144, 159], [163, 182, 201], [197, 220, 243]]]) ''' if x.ndim == 2: if y.ndim == 2: return np.dot(x, y) x3 = False y3 = True nlen = y.shape[0] elif y.ndim == 2: nlen = x.shape[0] x3 = True y3 = False else: assert x.shape[0] == y.shape[0] nlen = x.shape[0] x3 = True y3 = True assert x.shape[1+x3] == y.shape[0+y3] n, i, j, k = np.mgrid[0:nlen, 0:x.shape[0+x3], 0:y.shape[1+y3], 0:y.shape[0+y3]] return np.sum((x[n, i, k] if x3 else x[i,k]) * (y[n, k, j] if y3 else y[k,j]), 3)
def dot_n(x, y)
given two tensors N x I x K and N x K x J return N dot products If either x or y is 2-dimensional, broadcast it over all N. Dot products are size N x I x J. Example: x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]]) y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]]) print dot_n(x,y) array([[[ 9, 12, 15], [ 19, 26, 33], [ 29, 40, 51]], [[129, 144, 159], [163, 182, 201], [197, 220, 243]]])
2.476165
1.555972
1.591394
'''Given a listlike, x, return all permutations of x Returns the permutations of x in the lexical order of their indices: e.g. >>> x = [ 1, 2, 3, 4 ] >>> for p in permutations(x): >>> print p [ 1, 2, 3, 4 ] [ 1, 2, 4, 3 ] [ 1, 3, 2, 4 ] [ 1, 3, 4, 2 ] [ 1, 4, 2, 3 ] [ 1, 4, 3, 2 ] [ 2, 1, 3, 4 ] ... [ 4, 3, 2, 1 ] ''' # # The algorithm is attributed to Narayana Pandit from his # Ganita Kaumundi (1356). The following is from # # http://en.wikipedia.org/wiki/Permutation#Systematic_generation_of_all_permutations # # 1. Find the largest index k such that a[k] < a[k + 1]. # If no such index exists, the permutation is the last permutation. # 2. Find the largest index l such that a[k] < a[l]. # Since k + 1 is such an index, l is well defined and satisfies k < l. # 3. Swap a[k] with a[l]. # 4. Reverse the sequence from a[k + 1] up to and including the final # element a[n]. # yield list(x) # don't forget to do the first one x = np.array(x) a = np.arange(len(x)) while True: # 1 - find largest or stop ak_lt_ak_next = np.argwhere(a[:-1] < a[1:]) if len(ak_lt_ak_next) == 0: raise StopIteration() k = ak_lt_ak_next[-1, 0] # 2 - find largest a[l] < a[k] ak_lt_al = np.argwhere(a[k] < a) l = ak_lt_al[-1, 0] # 3 - swap a[k], a[l] = (a[l], a[k]) # 4 - reverse if k < len(x)-1: a[k+1:] = a[:k:-1].copy() yield x[a].tolist()
def permutations(x)
Given a listlike, x, return all permutations of x Returns the permutations of x in the lexical order of their indices: e.g. >>> x = [ 1, 2, 3, 4 ] >>> for p in permutations(x): >>> print p [ 1, 2, 3, 4 ] [ 1, 2, 4, 3 ] [ 1, 3, 2, 4 ] [ 1, 3, 4, 2 ] [ 1, 4, 2, 3 ] [ 1, 4, 3, 2 ] [ 2, 1, 3, 4 ] ... [ 4, 3, 2, 1 ]
2.889035
2.390663
1.208466
'''Circular Hough transform of an image img - image to be transformed. radius - radius of circle nangles - # of angles to measure, e.g. nangles = 4 means accumulate at 0, 90, 180 and 270 degrees. Return the Hough transform of the image which is the accumulators for the transform x + r cos t, y + r sin t. ''' a = np.zeros(img.shape) m = np.zeros(img.shape) if nangles is None: # if no angle specified, take the circumference # Round to a multiple of 4 to make it bilaterally stable nangles = int(np.pi * radius + 3.5) & (~ 3) for i in range(nangles): theta = 2*np.pi * float(i) / float(nangles) x = int(np.round(radius * np.cos(theta))) y = int(np.round(radius * np.sin(theta))) xmin = max(0, -x) xmax = min(img.shape[1] - x, img.shape[1]) ymin = max(0, -y) ymax = min(img.shape[0] - y, img.shape[0]) dest = (slice(ymin, ymax), slice(xmin, xmax)) src = (slice(ymin+y, ymax+y), slice(xmin+x, xmax+x)) if mask is not None: a[dest][mask[src]] += img[src][mask[src]] m[dest][mask[src]] += 1 else: a[dest] += img[src] m[dest] += 1 a[m > 0] /= m[m > 0] return a
def circular_hough(img, radius, nangles = None, mask=None)
Circular Hough transform of an image img - image to be transformed. radius - radius of circle nangles - # of angles to measure, e.g. nangles = 4 means accumulate at 0, 90, 180 and 270 degrees. Return the Hough transform of the image which is the accumulators for the transform x + r cos t, y + r sin t.
2.873579
2.026479
1.418015
'''The predicted state vector for the next time point From Welch eqn 1.9 ''' if not self.has_cached_predicted_state_vec: self.p_state_vec = dot_n( self.translation_matrix, self.state_vec[:, :, np.newaxis])[:,:,0] return self.p_state_vec
def predicted_state_vec(self)
The predicted state vector for the next time point From Welch eqn 1.9
7.76088
4.500947
1.724277
'''The predicted observation vector The observation vector for the next step in the filter. ''' if not self.has_cached_obs_vec: self.obs_vec = dot_n( self.observation_matrix, self.predicted_state_vec[:,:,np.newaxis])[:,:,0] return self.obs_vec
def predicted_obs_vec(self)
The predicted observation vector The observation vector for the next step in the filter.
6.084919
4.355196
1.397163
'''Rewrite the feature indexes based on the next frame's identities old_indices - for each feature in the new frame, the index of the old feature ''' nfeatures = len(old_indices) noldfeatures = len(self.state_vec) if nfeatures > 0: self.state_vec = self.state_vec[old_indices] self.state_cov = self.state_cov[old_indices] self.noise_var = self.noise_var[old_indices] if self.has_cached_obs_vec: self.obs_vec = self.obs_vec[old_indices] if self.has_cached_predicted_state_vec: self.p_state_vec = self.p_state_vec[old_indices] if len(self.state_noise_idx) > 0: # # We have to renumber the new_state_noise indices and get rid # of those that don't map to numbers. Typical index trick here: # * create an array for each legal old element: -1 = no match # * give each old element in the array the new number # * Filter out the "no match" elements. # reverse_indices = -np.ones(noldfeatures, int) reverse_indices[old_indices] = np.arange(nfeatures) self.state_noise_idx = reverse_indices[self.state_noise_idx] self.state_noise = self.state_noise[self.state_noise_idx != -1,:] self.state_noise_idx = self.state_noise_idx[self.state_noise_idx != -1]
def map_frames(self, old_indices)
Rewrite the feature indexes based on the next frame's identities old_indices - for each feature in the new frame, the index of the old feature
4.140119
3.347994
1.236597
'''Add new features to the state kept_indices - the mapping from all indices in the state to new indices in the new version new_indices - the indices of the new features in the new version new_state_vec - the state vectors for the new indices new_state_cov - the covariance matrices for the new indices new_noise_var - the noise variances for the new indices ''' assert len(kept_indices) == len(self.state_vec) assert len(new_indices) == len(new_state_vec) assert len(new_indices) == len(new_state_cov) assert len(new_indices) == len(new_noise_var) if self.has_cached_obs_vec: del self.obs_vec if self.has_cached_predicted_state_vec: del self.predicted_obs_vec nfeatures = len(kept_indices) + len(new_indices) next_state_vec = np.zeros((nfeatures, self.state_len)) next_state_cov = np.zeros((nfeatures, self.state_len, self.state_len)) next_noise_var = np.zeros((nfeatures, self.state_len)) if len(kept_indices) > 0: next_state_vec[kept_indices] = self.state_vec next_state_cov[kept_indices] = self.state_cov next_noise_var[kept_indices] = self.noise_var if len(self.state_noise_idx) > 0: self.state_noise_idx = kept_indices[self.state_noise_idx] if len(new_indices) > 0: next_state_vec[new_indices] = new_state_vec next_state_cov[new_indices] = new_state_cov next_noise_var[new_indices] = new_noise_var self.state_vec = next_state_vec self.state_cov = next_state_cov self.noise_var = next_noise_var
def add_features(self, kept_indices, new_indices, new_state_vec, new_state_cov, new_noise_var)
Add new features to the state kept_indices - the mapping from all indices in the state to new indices in the new version new_indices - the indices of the new features in the new version new_state_vec - the state vectors for the new indices new_state_cov - the covariance matrices for the new indices new_noise_var - the noise variances for the new indices
1.816962
1.529093
1.188261
'''Return a deep copy of the state''' c = KalmanState(self.observation_matrix, self.translation_matrix) c.state_vec = self.state_vec.copy() c.state_cov = self.state_cov.copy() c.noise_var = self.noise_var.copy() c.state_noise = self.state_noise.copy() c.state_noise_idx = self.state_noise_idx.copy() return c
def deep_copy(self)
Return a deep copy of the state
2.947849
2.820705
1.045075
'''Equivalent to matlab prctile(x,p), uses linear interpolation.''' x=np.array(x).flatten() listx = np.sort(x) xpcts=[] lenlistx=len(listx) refs=[] for i in range(0,lenlistx): r=100*((.5+i)/lenlistx) #refs[i] is percentile of listx[i] in matrix x refs.append(r) rpcts=[] for p in percents: if p<refs[0]: rpcts.append(listx[0]) elif p>refs[-1]: rpcts.append(listx[-1]) else: for j in range(0,lenlistx): #lenlistx=len(refs) if refs[j]<=p and refs[j+1]>=p: my=listx[j+1]-listx[j] mx=refs[j+1]-refs[j] m=my/mx #slope of line between points rpcts.append((m*(p-refs[j]))+listx[j]) break xpcts.append(rpcts) return np.array(xpcts).transpose()
def prcntiles(x,percents)
Equivalent to matlab prctile(x,p), uses linear interpolation.
3.443912
3.023849
1.138917
'''Tries to guess if the image contains dark objects on a bright background (1) or if the image contains bright objects on a dark background (-1), or if it contains both dark and bright objects on a gray background (0).''' pct=prcntiles(np.array(data),[1,20,80,99]) upper=pct[3]-pct[2] mid=pct[2]-pct[1] lower=pct[1]-pct[0] ##print 'upper = '+str(upper) ##print 'mid = '+str(mid) ##print 'lower = '+str(lower) #upper objects if upper>mid: uo=1 else: uo=0 ##print 'uo = '+str(uo) #lower objects if lower>mid: lo=1 else: lo=0 ##print 'lo = '+str(lo) if uo==1: if lo==1: mode=0 #both upper and lower objects else: mode=-1 #only upper objects else: if lo==1: mode=1 #only lower objects else: mode=0 #no objects at all return mode
def automode(data)
Tries to guess if the image contains dark objects on a bright background (1) or if the image contains bright objects on a dark background (-1), or if it contains both dark and bright objects on a gray background (0).
3.221039
2.433055
1.323866
'''u is np.array''' X = np.array([(1.-u)**3 , 4-(6.*(u**2))+(3.*(u**3)) , 1.+(3.*u)+(3.*(u**2))-(3.*(u**3)) , u**3]) * (1./6) return X
def spline_factors(u)
u is np.array
6.555353
6.408518
1.022913
'''Index to first value in picklist that is larger than val. If none is larger, index=len(picklist).''' assert np.all(np.sort(picklist) == picklist), "pick list is not ordered correctly" val = np.array(val) i_pick, i_val = np.mgrid[0:len(picklist),0:len(val)] # # Mark a picklist entry as 1 if the value is before or at, # mark it as zero if it is afterward # is_not_larger = picklist[i_pick] <= val[i_val] # # The index is the number of entries that are 1 # p = np.sum(is_not_larger, 0) return p
def pick(picklist,val)
Index to first value in picklist that is larger than val. If none is larger, index=len(picklist).
5.611813
4.546666
1.23427
'''Confine x to [low,high]. Values outside are set to low/high. See also restrict.''' y=x.copy() y[y < low] = low y[y > high] = high return y
def confine(x,low,high)
Confine x to [low,high]. Values outside are set to low/high. See also restrict.
5.294755
2.962229
1.787423
'''returns the gaussian with mean m_y and std. dev. sigma, calculated at the points of x.''' e_y = [np.exp((1.0/(2*float(sigma)**2)*-(n-m_y)**2)) for n in np.array(x)] y = [1.0/(float(sigma) * np.sqrt(2 * np.pi)) * e for e in e_y] return np.array(y)
def gauss(x,m_y,sigma)
returns the gaussian with mean m_y and std. dev. sigma, calculated at the points of x.
4.373015
3.301459
1.324571
'''returns the second derivative of the gaussian with mean m_y, and standard deviation sigma, calculated at the points of x.''' return gauss(x,m_y,sigma)*[-1/sigma**2 + (n-m_y)**2/sigma**4 for n in x]
def d2gauss(x,m_y,sigma)
returns the second derivative of the gaussian with mean m_y, and standard deviation sigma, calculated at the points of x.
6.351831
3.676574
1.72765
'''For boundary constraints, the first two and last two spline pieces are constrained to be part of the same cubic curve.''' V = np.kron(spline_matrix(x,px),spline_matrix(y,py)) lenV = len(V) if mask is not None: indices = np.nonzero(mask.T.flatten()) if len(indices)>1: indices = np.nonzero(mask.T.flatten())[1][0] newV=V.T[indices] V=newV.T V=V.reshape((V.shape[0],V.shape[1])) return V
def spline_matrix2d(x,y,px,py,mask=None)
For boundary constraints, the first two and last two spline pieces are constrained to be part of the same cubic curve.
5.992653
3.535177
1.695149
'''Make a least squares fit of the spline (px,py,pz) to the surface (x,y,z). If mask is given, only masked points are used for the regression.''' if mask is None: V = np.array(spline_matrix2d(x, y, px, py)) a = np.array(z.T.flatten()) pz = np.linalg.lstsq(V.T, a.T)[0].T else: V = np.array(spline_matrix2d(x,y,px,py,mask)) indices = np.nonzero(np.array(mask).T.flatten()) if len(indices[0])==0: pz = np.zeros((len(py),len(px))) #indices is empty when mask changes to all zeros else: a = np.array((z.T.flatten()[indices[0]])) pz = np.linalg.lstsq(V.T, a.T)[0].T pz=pz.reshape((len(py),len(px))) return pz.transpose()
def splinefit2d(x, y, z, px, py, mask=None)
Make a least squares fit of the spline (px,py,pz) to the surface (x,y,z). If mask is given, only masked points are used for the regression.
3.679736
2.821247
1.304294
'''Reads file, subtracts background. Returns [compensated image, background].''' from PIL import Image import pylab from matplotlib.image import pil_to_array from centrosome.filter import canny import matplotlib img = Image.open(img) if img.mode=='I;16': # 16-bit image # deal with the endianness explicitly... I'm not sure # why PIL doesn't get this right. imgdata = np.fromstring(img.tostring(),np.uint8) imgdata.shape=(int(imgdata.shape[0]/2),2) imgdata = imgdata.astype(np.uint16) hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0) imgdata = imgdata[:,hi]*256 + imgdata[:,lo] img_size = list(img.size) img_size.reverse() new_img = imgdata.reshape(img_size) # The magic # for maximum sample value is 281 if 281 in img.tag: img = new_img.astype(np.float32) / img.tag[281][0] elif np.max(new_img) < 4096: img = new_img.astype(np.float32) / 4095. else: img = new_img.astype(np.float32) / 65535. else: img = pil_to_array(img) pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r) pylab.show() if len(img.shape)>2: raise ValueError('Image must be grayscale') ## Create mask that will fix problem when image has black areas outside of well edges = canny(img, np.ones(img.shape, bool), 2, .1, .3) ci = np.cumsum(edges, 0) cj = np.cumsum(edges, 1) i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]] mask = ci > 0 mask = mask & (cj > 0) mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]]) mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1]) import time t0 = time.clock() bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale) print("Executed in %f sec" % (time.clock() - t0)) bg[~mask] = img[~mask] pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r) pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r) pylab.show()
def bg_compensate(img, sigma, splinepoints, scale)
Reads file, subtracts background. Returns [compensated image, background].
3.578878
3.412389
1.04879
'''Compute the mode of an array a: an array returns a vector of values which are the most frequent (more than one if there is a tie). ''' a = np.asanyarray(a) if a.size == 0: return np.zeros(0, a.dtype) aa = a.flatten() aa.sort() indices = np.hstack([[0], np.where(aa[:-1] != aa[1:])[0]+1, [aa.size]]) counts = indices[1:] - indices[:-1] best_indices = indices[:-1][counts == np.max(counts)] return aa[best_indices]
def mode(a)
Compute the mode of an array a: an array returns a vector of values which are the most frequent (more than one if there is a tie).
3.412216
2.288921
1.490753
assert min_threshold is None or max_threshold is None or min_threshold < max_threshold def constrain(threshold): if not min_threshold is None and threshold < min_threshold: threshold = min_threshold if not max_threshold is None and threshold > max_threshold: threshold = max_threshold return threshold data = np.atleast_1d(data) data = data[~ np.isnan(data)] if len(data) == 0: return (min_threshold if not min_threshold is None else max_threshold if not max_threshold is None else 0) elif len(data) == 1: return constrain(data[0]) if bins > len(data): bins = len(data) data.sort() var = running_variance(data) rvar = np.flipud(running_variance(np.flipud(data))) thresholds = data[1:len(data):len(data)//bins] score_low = (var[0:len(data)-1:len(data)//bins] * np.arange(0,len(data)-1,len(data)//bins)) score_high = (rvar[1:len(data):len(data)//bins] * (len(data) - np.arange(1,len(data),len(data)//bins))) scores = score_low + score_high if len(scores) == 0: return constrain(thresholds[0]) index = np.argwhere(scores == scores.min()).flatten() if len(index)==0: return constrain(thresholds[0]) # # Take the average of the thresholds to either side of # the chosen value to get an intermediate in cases where there is # a steep step between the background and foreground index = index[0] if index == 0: index_low = 0 else: index_low = index-1 if index == len(thresholds)-1: index_high = len(thresholds)-1 else: index_high = index+1 return constrain((thresholds[index_low]+thresholds[index_high]) / 2)
def otsu(data, min_threshold=None, max_threshold=None,bins=256)
Compute a threshold using Otsu's method data - an array of intensity values between zero and one min_threshold - only consider thresholds above this minimum value max_threshold - only consider thresholds below this maximum value bins - we bin the data into this many equally-spaced bins, then pick the bin index that optimizes the metric
2.513831
2.535352
0.991512
data = np.atleast_1d(data) data = data[~ np.isnan(data)] if len(data) == 0: return 0 elif len(data) == 1: return data[0] if bins > len(data): bins = len(data) data.sort() var = running_variance(data)+1.0/512.0 rvar = np.flipud(running_variance(np.flipud(data)))+1.0/512.0 thresholds = data[1:len(data):len(data)//bins] w = np.arange(0,len(data)-1,len(data)//bins) score_low = w * np.log(var[0:len(data)-1:len(data)//bins] * w * np.sqrt(2*np.pi*np.exp(1))) score_low[np.isnan(score_low)]=0 w = len(data) - np.arange(1,len(data),len(data)//bins) score_high = w * np.log(rvar[1:len(data):len(data)//bins] * w * np.sqrt(2*np.pi*np.exp(1))) score_high[np.isnan(score_high)]=0 scores = score_low + score_high index = np.argwhere(scores == scores.min()).flatten() if len(index)==0: return thresholds[0] # # Take the average of the thresholds to either side of # the chosen value to get an intermediate in cases where there is # a steep step between the background and foreground index = index[0] if index == 0: index_low = 0 else: index_low = index-1 if index == len(thresholds)-1: index_high = len(thresholds)-1 else: index_high = index+1 return (thresholds[index_low]+thresholds[index_high]) / 2
def entropy(data, bins=256)
Compute a threshold using Ray's entropy measurement data - an array of intensity values between zero and one bins - we bin the data into this many equally-spaced bins, then pick the bin index that optimizes the metric
2.920562
2.921957
0.999523
return 0 var = running_variance(data) rvar = np.flipud(running_variance(np.flipud(data))) if bins > len(data): bins = len(data) bin_len = int(len(data)//bins) thresholds = data[0:len(data):bin_len] score_low = (var[0:len(data):bin_len] * np.arange(0,len(data),bin_len)) score_high = (rvar[0:len(data):bin_len] * (len(data) - np.arange(0,len(data),bin_len))) # # Compute the middles # cs = data.cumsum() cs2 = (data**2).cumsum() i,j = np.mgrid[0:score_low.shape[0],0:score_high.shape[0]]*bin_len diff = (j-i).astype(float) w = diff mean = (cs[j] - cs[i]) / diff mean2 = (cs2[j] - cs2[i]) / diff score_middle = w * (mean2 - mean**2) score_middle[i >= j] = np.Inf score = score_low[i*bins//len(data)] + score_middle + score_high[j*bins//len(data)] best_score = np.min(score) best_i_j = np.argwhere(score==best_score) return (thresholds[best_i_j[0,0]],thresholds[best_i_j[0,1]])
def otsu3(data, min_threshold=None, max_threshold=None,bins=128): assert min_threshold is None or max_threshold is None or min_threshold < max_threshold # # Compute the running variance and reverse running variance. # data = np.atleast_1d(data) data = data[~ np.isnan(data)] data.sort() if len(data) == 0
Compute a threshold using a 3-category Otsu-like method data - an array of intensity values between zero and one min_threshold - only consider thresholds above this minimum value max_threshold - only consider thresholds below this maximum value bins - we bin the data into this many equally-spaced bins, then pick the bin index that optimizes the metric We find the maximum weighted variance, breaking the histogram into three pieces. Returns the lower and upper thresholds
2.81703
3.010319
0.935791
'''Compute entropy scores, given a variance and # of bins ''' if w is None: n = len(var) w = np.arange(0,n,n//bins) / float(n) if decimate: n = len(var) var = var[0:n:n//bins] score = w * np.log(var * w * np.sqrt(2*np.pi*np.exp(1))) score[np.isnan(score)]=np.Inf return score
def entropy_score(var,bins, w=None, decimate=True)
Compute entropy scores, given a variance and # of bins
3.863257
3.410533
1.132743
'''Given a vector x, compute the variance for x[0:i] Thank you http://www.johndcook.com/standard_deviation.html S[i] = S[i-1]+(x[i]-mean[i-1])*(x[i]-mean[i]) var(i) = S[i] / (i-1) ''' n = len(x) # The mean of x[0:i] m = x.cumsum() / np.arange(1,n+1) # x[i]-mean[i-1] for i=1... x_minus_mprev = x[1:]-m[:-1] # x[i]-mean[i] for i=1... x_minus_m = x[1:]-m[1:] # s for i=1... s = (x_minus_mprev*x_minus_m).cumsum() var = s / np.arange(1,n) # Prepend Inf so we have a variance for x[0] return np.hstack(([0],var))
def running_variance(x)
Given a vector x, compute the variance for x[0:i] Thank you http://www.johndcook.com/standard_deviation.html S[i] = S[i-1]+(x[i]-mean[i-1])*(x[i]-mean[i]) var(i) = S[i] / (i-1)
3.265172
2.309819
1.413605
output = numpy.zeros(labels.shape, labels.dtype) lr_different = labels[1:,:]!=labels[:-1,:] ud_different = labels[:,1:]!=labels[:,:-1] d1_different = labels[1:,1:]!=labels[:-1,:-1] d2_different = labels[1:,:-1]!=labels[:-1,1:] different = numpy.zeros(labels.shape, bool) different[1:,:][lr_different] = True different[:-1,:][lr_different] = True different[:,1:][ud_different] = True different[:,:-1][ud_different] = True different[1:,1:][d1_different] = True different[:-1,:-1][d1_different] = True different[1:,:-1][d2_different] = True different[:-1,1:][d2_different] = True # # Labels on edges need outlines # different[0,:] = True different[:,0] = True different[-1,:] = True different[:,-1] = True output[different] = labels[different] return output
def outline(labels)
Given a label matrix, return a matrix of the outlines of the labeled objects If a pixel is not zero and has at least one neighbor with a different value, then it is part of the outline.
2.039551
1.984559
1.02771
(x1, y1) = point1 (x2, y2) = point2 return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def euclidean_dist(point1, point2)
Compute the Euclidean distance between two points. Parameters ---------- point1, point2 : 2-tuples of float The input points. Returns ------- d : float The distance between the input points. Examples -------- >>> point1 = (1.0, 2.0) >>> point2 = (4.0, 6.0) # (3., 4.) away, simplest Pythagorean triangle >>> euclidean_dist(point1, point2) 5.0
1.662145
2.290898
0.725543
labels = labels.astype(int) areas = scipy.ndimage.measurements.sum(labels != 0, labels, list(range(1, numpy.max(labels) + 1))) existing_labels = [i for (i, a) in enumerate(areas, 1) if a > 0] existing_areas = [a for a in areas if a > 0] existing_centers = scipy.ndimage.measurements.center_of_mass(labels != 0, labels, existing_labels) zipped = zip(existing_labels, existing_centers, existing_areas) features = [CellFeatures(c, a, i, labels.shape) for i, c, a in zipped if a != 0] return features
def from_labels(labels)
Creates list of cell features based on label image (1-oo pixel values) @return: list of cell features in the same order as labels
3.14085
3.104128
1.01183
traces = [] for d1n, d2n in six.iteritems(assignments): # check if the match is between existing cells if d1n < len(detections_1) and d2n < len(detections_2): traces.append(Trace(detections_1[d1n], detections_2[d2n])) return traces
def from_detections_assignment(detections_1, detections_2, assignments)
Creates traces out of given assignment and cell data.
3.512713
2.996561
1.172248
self.scale = self.parameters_tracking["avgCellDiameter"] / 35.0 detections_1 = self.derive_detections(label_image_1) detections_2 = self.derive_detections(label_image_2) # Calculate tracking based on cell features and position. traces = self.find_initials_traces(detections_1, detections_2) # Use neighbourhoods to improve tracking. for _ in range(int(self.parameters_tracking["iterations"])): traces = self.improve_traces(detections_1, detections_2, traces) # Filter traces. return [(trace.previous_cell.number, trace.current_cell.number) for trace in traces]
def run_tracking(self, label_image_1, label_image_2)
Tracks cells between input label images. @returns: injective function from old objects to new objects (pairs of [old, new]). Number are compatible with labels.
5.147165
5.183643
0.992963
return cell_detection.area > self.parameters_tracking["big_size"] * self.scale * self.scale
def is_cell_big(self, cell_detection)
Check if the cell is considered big. @param CellFeature cell_detection: @return:
13.602003
19.190386
0.708793
all_cells = [c for c in all_cells if c != cell] sorted_cells = sorted([(cell.distance(c), c) for c in all_cells]) return [sc[1] for sc in sorted_cells[:k] if sc[0] <= max_dist]
def find_closest_neighbours(cell, all_cells, k, max_dist)
Find k closest neighbours of the given cell. :param CellFeatures cell: cell of interest :param all_cells: cell to consider as neighbours :param int k: number of neighbours to be returned :param int max_dist: maximal distance in pixels to consider neighbours :return: k closest neighbours
2.635945
3.217633
0.819219
distance = euclidean_dist(d1.center, d2.center) / self.scale area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area) return distance + self.parameters_cost_initial["area_weight"] * area_change
def calculate_basic_cost(self, d1, d2)
Calculates assignment cost between two cells.
5.125245
4.989683
1.027168
my_nbrs_with_motion = [n for n in neighbours[d1] if n in motions] my_motion = (d1.center[0] - d2.center[0], d1.center[1] - d2.center[1]) if my_nbrs_with_motion == []: distance = euclidean_dist(d1.center, d2.center) / self.scale else: # it is not in motions if there is no trace (cell is considered to vanish) distance = min([euclidean_dist(my_motion, motions[n]) for n in my_nbrs_with_motion]) / self.scale area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area) return distance + self.parameters_cost_iteration["area_weight"] * area_change
def calculate_localised_cost(self, d1, d2, neighbours, motions)
Calculates assignment cost between two cells taking into account the movement of cells neighbours. :param CellFeatures d1: detection in first frame :param CellFeatures d2: detection in second frame
3.934789
3.72746
1.055622
global invalid_match size_sum = len(detections_1) + len(detections_2) # Cost matrix extended by matching cells with nothing # (for detection 1 it means losing cells, for detection 2 it means new cells). cost_matrix = numpy.zeros((size_sum, size_sum)) # lost cells cost cost_matrix[0:len(detections_1), len(detections_2):size_sum] = params["default_empty_cost"] + (1 - numpy.eye( len(detections_1), len(detections_1))) * invalid_match # new cells cost cost_matrix[len(detections_1):size_sum, 0:len(detections_2)] = params["default_empty_cost"] + (1 - numpy.eye( len(detections_2), len(detections_2))) * invalid_match # increase costs for reliable detections for row in [i for i in range(0, len(detections_1)) if detections_1[i].is_reliable() and ( not params["check_if_big"] or self.is_cell_big(detections_1[i]))]: cost_matrix[row, len(detections_2):size_sum] *= params["default_empty_reliable_cost_mult"] for col in [i for i in range(0, len(detections_2)) if detections_2[i].is_reliable() and ( not params["check_if_big"] or self.is_cell_big(detections_2[i]))]: cost_matrix[len(detections_1):size_sum, col] *= params["default_empty_reliable_cost_mult"] # calculate cost of matching cells def cost_if_not_too_far(detection_1, detection_2): if detection_1.distance(detection_2) <= self.parameters_tracking["max_distance"]: return calculate_match_cost(detection_1, detection_2) else: return invalid_match cost_matrix[0:len(detections_1), 0:len(detections_2)] = [[cost_if_not_too_far(d1, d2) for d2 in detections_2] for d1 in detections_1] return cost_matrix
def calculate_costs(self, detections_1, detections_2, calculate_match_cost, params)
Calculates assignment costs between detections and 'empty' spaces. The smaller cost the better. @param detections_1: cell list of size n in previous frame @param detections_2: cell list of size m in current frame @return: cost matrix (n+m)x(n+m) extended by cost of matching cells with emptiness
2.40426
2.355284
1.020794
if costs is None or len(costs) == 0: return dict() n = costs.shape[0] pairs = [(i, j) for i in range(0, n) for j in range(0, n) if costs[i, j] < invalid_match] costs_list = [costs[i, j] for (i, j) in pairs] assignment = lapjv.lapjv(list(zip(*pairs))[0], list(zip(*pairs))[1], costs_list) indexes = enumerate(list(assignment[0])) return dict([(row, col) for row, col in indexes])
def solve_assignement(self, costs)
Solves assignment problem using Hungarian implementation by Brian M. Clapper. @param costs: square cost matrix @return: assignment function @rtype: int->int
3.70434
3.639249
1.017886
rr = np.random.RandomState() rr.seed(0) r = rr.normal(size=image.shape) delta = pow(2.0,-bits) image_copy = np.clip(image, delta, 1) result = np.exp2(np.log2(image_copy + delta) * r + (1-r) * np.log2(image_copy)) result[result>1] = 1 result[result<0] = 0 return result
def smooth_with_noise(image, bits)
Smooth the image with a per-pixel random multiplier image - the image to perturb bits - the noise is this many bits below the pixel value The noise is random with normal distribution, so the individual pixels get either multiplied or divided by a normally distributed # of bits
3.931896
3.664364
1.073009
not_mask = np.logical_not(mask) bleed_over = function(mask.astype(float)) masked_image = np.zeros(image.shape, image.dtype) masked_image[mask] = image[mask] smoothed_image = function(masked_image) output_image = smoothed_image / (bleed_over + np.finfo(float).eps) return output_image
def smooth_with_function_and_mask(image, function, mask)
Smooth an image with a linear function, ignoring the contribution of masked pixels image - image to smooth function - a function that takes an image and returns a smoothed image mask - mask with 1's for significant pixels, 0 for masked pixels This function calculates the fractional contribution of masked pixels by applying the function to the mask (which gets you the fraction of the pixel data that's due to significant points). We then mask the image and apply the function. The resulting values will be lower by the bleed-over fraction, so you can recalibrate by dividing by the function on the mask to recover the effect of smoothing from just the significant pixels.
3.059757
2.965567
1.031761
i,j = np.mgrid[-radius:radius+1,-radius:radius+1].astype(float) / radius mask = i**2 + j**2 <= 1 i = i * radius / sd j = j * radius / sd kernel = np.zeros((2*radius+1,2*radius+1)) kernel[mask] = np.e ** (-(i[mask]**2+j[mask]**2) / (2 * sd **2)) # # Normalize the kernel so that there is no net effect on a uniform image # kernel = kernel / np.sum(kernel) return kernel
def circular_gaussian_kernel(sd,radius)
Create a 2-d Gaussian convolution kernel sd - standard deviation of the gaussian in pixels radius - build a circular kernel that convolves all points in the circle bounded by this radius
3.257015
3.388579
0.961174
'''Return an "image" which is a polynomial fit to the pixel data Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F pixel_data - a two-dimensional numpy array to be fitted mask - a mask of pixels whose intensities should be considered in the least squares fit clip - if True, clip the output array so that pixels less than zero in the fitted image are zero and pixels that are greater than one are one. ''' mask = np.logical_and(mask,pixel_data > 0) if not np.any(mask): return pixel_data x,y = np.mgrid[0:pixel_data.shape[0],0:pixel_data.shape[1]] x2 = x*x y2 = y*y xy = x*y o = np.ones(pixel_data.shape) a = np.array([x[mask],y[mask],x2[mask],y2[mask],xy[mask],o[mask]]) coeffs = scipy.linalg.lstsq(a.transpose(),pixel_data[mask])[0] output_pixels = np.sum([coeff * index for coeff, index in zip(coeffs, [x,y,x2,y2,xy,o])],0) if clip: output_pixels[output_pixels > 1] = 1 output_pixels[output_pixels < 0] = 0 return output_pixels
def fit_polynomial(pixel_data, mask, clip=True)
Return an "image" which is a polynomial fit to the pixel data Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F pixel_data - a two-dimensional numpy array to be fitted mask - a mask of pixels whose intensities should be considered in the least squares fit clip - if True, clip the output array so that pixels less than zero in the fitted image are zero and pixels that are greater than one are one.
3.122214
1.761472
1.772502
global_threshold = get_global_threshold( threshold_method, image, mask, **kwargs) global_threshold *= threshold_correction_factor if not threshold_range_min is None: global_threshold = max(global_threshold, threshold_range_min) if not threshold_range_max is None: global_threshold = min(global_threshold, threshold_range_max) if threshold_modifier == TM_GLOBAL: local_threshold=global_threshold elif threshold_modifier == TM_ADAPTIVE: local_threshold = get_adaptive_threshold( threshold_method, image, global_threshold, mask, adaptive_window_size = adaptive_window_size, **kwargs) local_threshold = local_threshold * threshold_correction_factor elif threshold_modifier == TM_PER_OBJECT: local_threshold = get_per_object_threshold( threshold_method, image, global_threshold, mask, labels, threshold_range_min, threshold_range_max,**kwargs) local_threshold = local_threshold * threshold_correction_factor else: raise NotImplementedError("%s thresholding is not implemented"%(threshold_modifier)) if isinstance(local_threshold, np.ndarray): # # Constrain thresholds to within .7 to 1.5 of the global threshold. # threshold_range_min = max(threshold_range_min, global_threshold * .7) threshold_range_max = min(threshold_range_max, global_threshold * 1.5) if not threshold_range_min is None: local_threshold[local_threshold < threshold_range_min] = \ threshold_range_min if not threshold_range_max is None: local_threshold[local_threshold > threshold_range_max] = \ threshold_range_max if (threshold_modifier == TM_PER_OBJECT) and (labels is not None): local_threshold[labels == 0] = 1.0 else: if not threshold_range_min is None: local_threshold = max(local_threshold, threshold_range_min) if not threshold_range_max is None: local_threshold = min(local_threshold, threshold_range_max) return local_threshold, global_threshold
def get_threshold(threshold_method, threshold_modifier, image, mask=None, labels = None, threshold_range_min = None, threshold_range_max = None, threshold_correction_factor = 1.0, adaptive_window_size = 10, **kwargs)
Compute a threshold for an image threshold_method - one of the TM_ methods above threshold_modifier - TM_GLOBAL to calculate one threshold over entire image TM_ADAPTIVE to calculate a per-pixel threshold TM_PER_OBJECT to calculate a different threshold for each object image - a NxM numpy array of the image data Returns a tuple of local_threshold and global_threshold where: * global_threshold is the single number calculated using the threshold method over the whole image * local_threshold is the global_threshold for global methods. For adaptive and per-object thresholding, local_threshold is a matrix of threshold values representing the threshold to be applied at each pixel of the image. Different methods have optional and required parameters: Required: TM_PER_OBJECT: labels - a labels matrix that defines the extents of the individual objects to be thresholded separately. Optional: All: mask - a mask of the significant pixels in the image threshold_range_min, threshold_range_max - constrain the threshold values to be examined to values between these limits threshold_correction_factor - the calculated threshold is multiplied by this number to get the final threshold TM_MOG (mixture of Gaussians): object_fraction - fraction of image expected to be occupied by objects (pixels that are above the threshold) TM_OTSU - We have algorithms derived from Otsu. There is a three-class version of Otsu in addition to the two class. There is also an entropy measure in addition to the weighted variance. two_class_otsu - assume that the distribution represents two intensity classes if true, three if false. use_weighted_variance - use Otsu's weighted variance if true, an entropy measure if false assign_middle_to_foreground - assign pixels in the middle class in a three-class Otsu to the foreground if true or the background if false.
1.83552
1.793157
1.023624
if mask is not None and not np.any(mask): return 1 if threshold_method == TM_OTSU: fn = get_otsu_threshold elif threshold_method == TM_MOG: fn = get_mog_threshold elif threshold_method == TM_BACKGROUND: fn = get_background_threshold elif threshold_method == TM_ROBUST_BACKGROUND: fn = get_robust_background_threshold elif threshold_method == TM_RIDLER_CALVARD: fn = get_ridler_calvard_threshold elif threshold_method == TM_KAPUR: fn = get_kapur_threshold elif threshold_method == TM_MCT: fn = get_maximum_correlation_threshold else: raise NotImplementedError("%s algorithm not implemented"%(threshold_method)) kwargs = dict([(k, v) for k, v in kwargs.items() if k in fn.args]) return fn(image, mask, **kwargs)
def get_global_threshold(threshold_method, image, mask = None, **kwargs)
Compute a single threshold over the whole image
3.101054
3.124643
0.992451
# for the X and Y direction, find the # of blocks, given the # size constraints image_size = np.array(image.shape[:2],dtype=int) nblocks = image_size // adaptive_window_size # # Use a floating point block size to apportion the roundoff # roughly equally to each block # increment = ( np.array(image_size,dtype=float) / np.array(nblocks,dtype=float)) # # Put the answer here # thresh_out = np.zeros(image_size, image.dtype) # # Loop once per block, computing the "global" threshold within the # block. # block_threshold = np.zeros([nblocks[0],nblocks[1]]) for i in range(nblocks[0]): i0 = int(i*increment[0]) i1 = int((i+1)*increment[0]) for j in range(nblocks[1]): j0 = int(j*increment[1]) j1 = int((j+1)*increment[1]) block = image[i0:i1,j0:j1] block_mask = None if mask is None else mask[i0:i1,j0:j1] block_threshold[i,j] = get_global_threshold( threshold_method, block, mask = block_mask, **kwargs) # # Use a cubic spline to blend the thresholds across the image to avoid image artifacts # spline_order = min(3, np.min(nblocks) - 1) xStart = int(increment[0] / 2) xEnd = int((nblocks[0] - 0.5) * increment[0]) yStart = int(increment[1] / 2) yEnd = int((nblocks[1] - 0.5) * increment[1]) xtStart = .5 xtEnd = image.shape[0] - .5 ytStart = .5 ytEnd = image.shape[1] - .5 block_x_coords = np.linspace(xStart,xEnd, nblocks[0]) block_y_coords = np.linspace(yStart,yEnd, nblocks[1]) adaptive_interpolation = scipy.interpolate.RectBivariateSpline( block_x_coords, block_y_coords, block_threshold, bbox = (xtStart, xtEnd, ytStart, ytEnd), kx = spline_order, ky = spline_order) thresh_out_x_coords = np.linspace(.5, int(nblocks[0] * increment[0]) - .5, thresh_out.shape[0]) thresh_out_y_coords = np.linspace(.5, int(nblocks[1] * increment[1]) - .5 , thresh_out.shape[1]) thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords) return thresh_out
def get_adaptive_threshold(threshold_method, image, threshold, mask = None, adaptive_window_size = 10, **kwargs)
Given a global threshold, compute a threshold per pixel Break the image into blocks, computing the threshold per block. Afterwards, constrain the block threshold to .7 T < t < 1.5 T. Block sizes must be at least 50x50. Images > 500 x 500 get 10x10 blocks.
2.547292
2.491392
1.022437
if labels is None: labels = np.ones(image.shape,int) if not mask is None: labels[np.logical_not(mask)] = 0 label_extents = scipy.ndimage.find_objects(labels,np.max(labels)) local_threshold = np.ones(image.shape,image.dtype) for i, extent in enumerate(label_extents, start=1): label_mask = labels[extent]==i if not mask is None: label_mask = np.logical_and(mask[extent], label_mask) values = image[extent] per_object_threshold = get_global_threshold( method, values, mask = label_mask, **kwargs) local_threshold[extent][label_mask] = per_object_threshold return local_threshold
def get_per_object_threshold(method, image, threshold, mask=None, labels=None, threshold_range_min = None, threshold_range_max = None, **kwargs)
Return a matrix giving threshold per pixel calculated per-object image - image to be thresholded mask - mask out "don't care" pixels labels - a label mask indicating object boundaries threshold - the global threshold
2.630528
2.645854
0.994208
cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)==0: return 0 img_min = np.min(cropped_image) img_max = np.max(cropped_image) if img_min == img_max: return cropped_image[0] # Only do the histogram between values a bit removed from saturation robust_min = 0.02 * (img_max - img_min) + img_min robust_max = 0.98 * (img_max - img_min) + img_min nbins = 256 cropped_image = cropped_image[np.logical_and(cropped_image > robust_min, cropped_image < robust_max)] if len(cropped_image) == 0: return robust_min h = scipy.ndimage.histogram(cropped_image, robust_min, robust_max, nbins) index = np.argmax(h) cutoff = float(index) / float(nbins-1) # # If we have a low (or almost no) background, the cutoff will be # zero since the background falls into the lowest bin. We want to # offset by the robust cutoff factor of .02. We rescale by 1.04 # to account for the 0.02 at the top and bottom. # cutoff = (cutoff + 0.02) / 1.04 return img_min + cutoff * 2 * (img_max - img_min)
def get_background_threshold(image, mask = None)
Get threshold based on the mode of the image The threshold is calculated by calculating the mode and multiplying by 2 (an arbitrary empirical factor). The user will presumably adjust the multiplication factor as needed.
3.624161
3.585111
1.010892
cropped_image = np.array(image.flat) if mask is None else image[mask] n_pixels = np.product(cropped_image.shape) if n_pixels<3: return 0 cropped_image.sort() if cropped_image[0] == cropped_image[-1]: return cropped_image[0] low_chop = int(round(n_pixels * lower_outlier_fraction)) hi_chop = n_pixels - int(round(n_pixels * upper_outlier_fraction)) im = cropped_image if low_chop == 0 else cropped_image[low_chop:hi_chop] mean = average_fn(im) sd = variance_fn(im) return mean+sd*deviations_above_average
def get_robust_background_threshold(image, mask = None, lower_outlier_fraction = 0.05, upper_outlier_fraction = 0.05, deviations_above_average = 2.0, average_fn = np.mean, variance_fn = np.std)
Calculate threshold based on mean & standard deviation The threshold is calculated by trimming the top and bottom 5% of pixels off the image, then calculating the mean and standard deviation of the remaining image. The threshold is then set at 2 (empirical value) standard deviations above the mean. image - the image to threshold mask - mask of pixels to consider (default = all pixels) lower_outlier_fraction - after ordering the pixels by intensity, remove the pixels from 0 to len(image) * lower_outlier_fraction from the threshold calculation (default = .05). upper_outlier_fraction - remove the pixels from len(image) * (1 - upper_outlier_fraction) to len(image) from consideration (default = .05). deviations_above_average - calculate the standard deviation or MAD and multiply by this number and add to the average to get the final threshold (default = 2) average_fn - function used to calculate the average intensity (e.g. np.mean, np.median or some sort of mode function). Default = np.mean variance_fn - function used to calculate the amount of variance. Default = np.sd
2.632797
2.569632
1.024581
'''Calculate the median absolute deviation of a sample a - a numpy array-like collection of values returns the median of the deviation of a from its median. ''' a = np.asfarray(a).flatten() return np.median(np.abs(a - np.median(a)))
def mad(a)
Calculate the median absolute deviation of a sample a - a numpy array-like collection of values returns the median of the deviation of a from its median.
4.842107
2.23916
2.162466
'''Calculate a binned mode of a sample a - array of values This routine bins the sample into np.sqrt(len(a)) bins. This is a number that is a compromise between fineness of measurement and the stochastic nature of counting which roughly scales as the square root of the sample size. ''' a = np.asarray(a).flatten() a_min = np.min(a) a_max = np.max(a) n_bins = np.ceil(np.sqrt(len(a))) b = ((a - a_min) / (a_max - a_min) * n_bins).astype(int) idx = np.argmax(np.bincount(b)) return np.percentile(a, 100 * float(idx+.5) / n_bins)
def binned_mode(a)
Calculate a binned mode of a sample a - array of values This routine bins the sample into np.sqrt(len(a)) bins. This is a number that is a compromise between fineness of measurement and the stochastic nature of counting which roughly scales as the square root of the sample size.
4.717057
1.920199
2.456546
cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)<3: return 0 if np.min(cropped_image) == np.max(cropped_image): return cropped_image[0] # We want to limit the dynamic range of the image to 256. Otherwise, # an image with almost all values near zero can give a bad result. min_val = np.max(cropped_image)/256; cropped_image[cropped_image < min_val] = min_val; im = np.log(cropped_image); min_val = np.min(im); max_val = np.max(im); im = (im - min_val)/(max_val - min_val); pre_thresh = 0; # This method needs an initial value to start iterating. Using # graythresh (Otsu's method) is probably not the best, because the # Ridler Calvard threshold ends up being too close to this one and in # most cases has the same exact value. new_thresh = otsu(im) delta = 0.00001; while abs(pre_thresh - new_thresh)>delta: pre_thresh = new_thresh; mean1 = np.mean(im[im<pre_thresh]); mean2 = np.mean(im[im>=pre_thresh]); new_thresh = np.mean([mean1,mean2]); return math.exp(min_val + (max_val-min_val)*new_thresh);
def get_ridler_calvard_threshold(image, mask = None)
Find a threshold using the method of Ridler and Calvard The reference for this method is: "Picture Thresholding Using an Iterative Selection Method" by T. Ridler and S. Calvard, in IEEE Transactions on Systems, Man and Cybernetics, vol. 8, no. 8, August 1978.
3.805231
3.711636
1.025217
cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)<3: return 0 if np.min(cropped_image) == np.max(cropped_image): return cropped_image[0] log_image = np.log2(smooth_with_noise(cropped_image, 8)) min_log_image = np.min(log_image) max_log_image = np.max(log_image) histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256) histogram_values = (min_log_image + (max_log_image - min_log_image)* np.arange(256, dtype=float) / 255) # drop any zero bins keep = histogram != 0 histogram = histogram[keep] histogram_values = histogram_values[keep] # check for corner cases if np.product(histogram_values)==1: return 2**histogram_values[0] # Normalize to probabilities p = histogram.astype(float) / float(np.sum(histogram)) # Find the probabilities totals up to and above each possible threshold. lo_sum = np.cumsum(p); hi_sum = lo_sum[-1] - lo_sum; lo_e = np.cumsum(p * np.log2(p)); hi_e = lo_e[-1] - lo_e; # compute the entropies lo_entropy = lo_e / lo_sum - np.log2(lo_sum); hi_entropy = hi_e / hi_sum - np.log2(hi_sum); sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]; sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf entry = np.argmin(sum_entropy); return 2**((histogram_values[entry] + histogram_values[entry+1]) / 2);
def get_kapur_threshold(image, mask=None)
The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space.
3.131021
3.049233
1.026823
'''Return the maximum correlation threshold of the image image - image to be thresholded mask - mask of relevant pixels bins - # of value bins to use This is an implementation of the maximum correlation threshold as described in Padmanabhan, "A novel algorithm for optimal image thresholding of biological data", Journal of Neuroscience Methods 193 (2010) p 380-384 ''' if mask is not None: image = image[mask] image = image.ravel() nm = len(image) if nm == 0: return 0 # # Bin the image # min_value = np.min(image) max_value = np.max(image) if min_value == max_value: return min_value image = ((image - min_value) * (bins - 1) / (max_value - min_value)).astype(int) histogram = np.bincount(image) # # Compute (j - mean) and (j - mean) **2 mean_value = np.mean(image) diff = np.arange(len(histogram)) - mean_value diff2 = diff * diff ndiff = histogram * diff ndiff2 = histogram * diff2 # # This is the sum over all j of (j-mean)**2. It's a constant that could # be factored out, but I follow the method and use it anyway. # sndiff2 = np.sum(ndiff2) # # Compute the cumulative sum from i to m which is the cumsum at m # minus the cumsum at i-1 cndiff = np.cumsum(ndiff) numerator = np.hstack([[cndiff[-1]], cndiff[-1] - cndiff[:-1]]) # # For the bottom, we need (Nm - Ni) * Ni / Nm # ni = nm - np.hstack([[0], np.cumsum(histogram[:-1])]) # number of pixels above i-1 denominator = np.sqrt(sndiff2 * (nm - ni) * ni / nm) # mct = numerator / denominator mct[denominator == 0] = 0 my_bin = np.argmax(mct)-1 return min_value + my_bin * (max_value - min_value) / (bins - 1)
def get_maximum_correlation_threshold(image, mask = None, bins = 256)
Return the maximum correlation threshold of the image image - image to be thresholded mask - mask of relevant pixels bins - # of value bins to use This is an implementation of the maximum correlation threshold as described in Padmanabhan, "A novel algorithm for optimal image thresholding of biological data", Journal of Neuroscience Methods 193 (2010) p 380-384
4.369818
3.227086
1.354106
if not np.any(mask): return 0 # # Clamp the dynamic range of the foreground # minval = np.max(image[mask])/256 if minval == 0: return 0 fg = np.log2(np.maximum(image[binary_image & mask], minval)) bg = np.log2(np.maximum(image[(~ binary_image) & mask], minval)) nfg = np.product(fg.shape) nbg = np.product(bg.shape) if nfg == 0: return np.var(bg) elif nbg == 0: return np.var(fg) else: return (np.var(fg) * nfg + np.var(bg)*nbg) / (nfg+nbg)
def weighted_variance(image, mask, binary_image)
Compute the log-transformed variance of foreground and background image - intensity image used for thresholding mask - mask of ignored pixels binary_image - binary image marking foreground and background
2.757155
2.825775
0.975717
mask=mask.copy() mask[np.isnan(image)] = False if not np.any(mask): return 0 # # Clamp the dynamic range of the foreground # minval = np.max(image[mask])/256 if minval == 0: return 0 clamped_image = image.copy() clamped_image[clamped_image < minval] = minval # # Smooth image with -8 bits of noise # image = smooth_with_noise(clamped_image, 8) im_min = np.min(image) im_max = np.max(image) # # Figure out the bounds for the histogram # upper = np.log2(im_max) lower = np.log2(im_min) if upper == lower: # All values are the same, answer is log2 of # of pixels return math.log(np.sum(mask),2) # # Create log-transformed lists of points in the foreground and background # fg = image[binary_image & mask] bg = image[(~ binary_image) & mask] if len(fg) == 0 or len(bg) == 0: return 0 log_fg = np.log2(fg) log_bg = np.log2(bg) # # Make these into histograms hfg = numpy_histogram(log_fg, 256, range=(lower,upper))[0] hbg = numpy_histogram(log_bg, 256, range=(lower,upper))[0] #hfg = scipy.ndimage.histogram(log_fg,lower,upper,256) #hbg = scipy.ndimage.histogram(log_bg,lower,upper,256) # # Drop empty bins # hfg = hfg[hfg>0] hbg = hbg[hbg>0] if np.product(hfg.shape) == 0: hfg = np.ones((1,),int) if np.product(hbg.shape) == 0: hbg = np.ones((1,),int) # # Normalize # hfg = hfg.astype(float) / float(np.sum(hfg)) hbg = hbg.astype(float) / float(np.sum(hbg)) # # Compute sum of entropies # return np.sum(hfg * np.log2(hfg)) + np.sum(hbg*np.log2(hbg))
def sum_of_entropies(image, mask, binary_image)
Bin the foreground and background pixels and compute the entropy of the distribution of points among the bins
2.634411
2.592509
1.016163
'''Renormalize image intensities to log space Returns a tuple of transformed image and a dictionary to be passed into inverse_log_transform. The minimum and maximum from the dictionary can be applied to an image by the inverse_log_transform to convert it back to its former intensity values. ''' orig_min, orig_max = scipy.ndimage.extrema(image)[:2] # # We add 1/2 bit noise to an 8 bit image to give the log a bottom # limage = image.copy() noise_min = orig_min + (orig_max-orig_min)/256.0+np.finfo(image.dtype).eps limage[limage < noise_min] = noise_min d = { "noise_min":noise_min} limage = np.log(limage) log_min, log_max = scipy.ndimage.extrema(limage)[:2] d["log_min"] = log_min d["log_max"] = log_max return stretch(limage), d
def log_transform(image)
Renormalize image intensities to log space Returns a tuple of transformed image and a dictionary to be passed into inverse_log_transform. The minimum and maximum from the dictionary can be applied to an image by the inverse_log_transform to convert it back to its former intensity values.
5.306401
3.023993
1.754767
'''A version of numpy.histogram that accounts for numpy's version''' args = inspect.getargs(np.histogram.__code__)[0] if args[-1] == "new": return np.histogram(a, bins, range, normed, weights, new=True) return np.histogram(a, bins, range, normed, weights)
def numpy_histogram(a, bins=10, range=None, normed=False, weights=None)
A version of numpy.histogram that accounts for numpy's version
4.495798
3.266151
1.376482
flat_image = image.ravel() sort_order = flat_image.argsort().astype(np.uint32) flat_image = flat_image[sort_order] sort_rank = np.zeros_like(sort_order) is_different = flat_image[:-1] != flat_image[1:] np.cumsum(is_different, out=sort_rank[1:]) original_values = np.zeros((sort_rank[-1]+1,),image.dtype) original_values[0] = flat_image[0] original_values[1:] = flat_image[1:][is_different] int_image = np.zeros_like(sort_order) int_image[sort_order] = sort_rank if nbins is not None: max_ranked_data = np.max(int_image) while max_ranked_data >= nbins: # # Decimate the bins until there are fewer than nbins # hist = np.bincount(int_image) # # Rank the bins from lowest count to highest order = np.argsort(hist) # # find enough to maybe decimate to nbins # candidates = order[:max_ranked_data+2-nbins] to_delete = np.zeros(max_ranked_data+2, bool) to_delete[candidates] = True # # Choose candidates that are either not next to others # or have an even index so as not to delete adjacent bins # td_mask = to_delete[:-1] & ( ((np.arange(max_ranked_data+1) & 2) == 0) | (~ to_delete[1:])) if td_mask[0]: td_mask[0] = False # # A value to be deleted has the same index as the following # value and the two end up being merged # rd_translation = np.cumsum(~td_mask)-1 # # Translate the rankings to the new space # int_image = rd_translation[int_image] # # Eliminate the bins with low counts # original_values = original_values[~td_mask] max_ranked_data = len(original_values)-1 return (int_image.reshape(image.shape), original_values)
def rank_order(image, nbins=None)
Return an image of the same shape where each pixel has the rank-order value of the corresponding pixel in the image. The returned image's elements are of type np.uint32 which simplifies processing in C code.
3.869862
3.895051
0.993533
nobjects = labels.max() objects = np.arange(nobjects + 1) lmin, lmax = scind.extrema(image, labels, objects)[:2] # Divisor is the object's max - min, or 1 if they are the same. divisor = np.ones((nobjects + 1,)) divisor[lmax > lmin] = (lmax - lmin)[lmax > lmin] return (image - lmin[labels]) / divisor[labels]
def normalized_per_object(image, labels)
Normalize the intensities of each object to the [0, 1] range.
4.219025
3.945055
1.069446
tmp = np.array(image // (1.0 / nlevels), dtype='i1') return tmp.clip(0, nlevels - 1)
def quantize(image, nlevels)
Quantize an image into integers 0, 1, ..., nlevels - 1. image -- a numpy array of type float, range [0, 1] nlevels -- an integer
5.376479
6.622518
0.811848
labels = labels.astype(int) nlevels = quantized_image.max() + 1 nobjects = labels.max() if scale_i < 0: scale_i = -scale_i scale_j = -scale_j if scale_i == 0 and scale_j > 0: image_a = quantized_image[:, :-scale_j] image_b = quantized_image[:, scale_j:] labels_ab = labels_a = labels[:, :-scale_j] labels_b = labels[:, scale_j:] elif scale_i > 0 and scale_j == 0: image_a = quantized_image[:-scale_i, :] image_b = quantized_image[scale_i:, :] labels_ab = labels_a = labels[:-scale_i, :] labels_b = labels[scale_i:, :] elif scale_i > 0 and scale_j > 0: image_a = quantized_image[:-scale_i, :-scale_j] image_b = quantized_image[scale_i:, scale_j:] labels_ab = labels_a = labels[:-scale_i, :-scale_j] labels_b = labels[scale_i:, scale_j:] else: # scale_j should be negative image_a = quantized_image[:-scale_i, -scale_j:] image_b = quantized_image[scale_i:, :scale_j] labels_ab = labels_a = labels[:-scale_i, -scale_j:] labels_b = labels[scale_i:, :scale_j] equilabel = ((labels_a == labels_b) & (labels_a > 0)) if np.any(equilabel): Q = (nlevels*nlevels*(labels_ab[equilabel]-1)+ nlevels*image_a[equilabel]+image_b[equilabel]) R = np.bincount(Q) if R.size != nobjects*nlevels*nlevels: S = np.zeros(nobjects*nlevels*nlevels-R.size) R = np.hstack((R, S)) P = R.reshape(nobjects, nlevels, nlevels) pixel_count = fix(scind.sum(equilabel, labels_ab, np.arange(nobjects, dtype=np.int32)+1)) pixel_count = np.tile(pixel_count[:,np.newaxis,np.newaxis], (1,nlevels,nlevels)) return (P.astype(float) / pixel_count.astype(float), nlevels) else: return np.zeros((nobjects, nlevels, nlevels)), nlevels
def cooccurrence(quantized_image, labels, scale_i=3, scale_j=0)
Calculates co-occurrence matrices for all the objects in the image. Return an array P of shape (nobjects, nlevels, nlevels) such that P[o, :, :] is the cooccurence matrix for object o. quantized_image -- a numpy array of integer type labels -- a numpy array of integer type scale -- an integer For each object O, the cooccurrence matrix is defined as follows. Given a row number I in the matrix, let A be the set of pixels in O with gray level I, excluding pixels in the rightmost S columns of the image. Let B be the set of pixels in O that are S pixels to the right of a pixel in A. Row I of the cooccurence matrix is the gray-level histogram of the pixels in B.
2.057046
2.037417
1.009634
"Correlation." multiplied = np.dot(self.levels[:, np.newaxis] + 1, self.levels[np.newaxis] + 1) repeated = np.tile(multiplied[np.newaxis], (self.nobjects, 1, 1)) summed = (repeated * self.P).sum(2).sum(1) h3 = (summed - self.mux * self.muy) / (self.sigmax * self.sigmay) h3[np.isinf(h3)] = 0 return h3
def H3(self)
Correlation.
4.326302
4.126328
1.048463
"Inverse difference moment." t = 1 + toeplitz(self.levels) ** 2 repeated = np.tile(t[np.newaxis], (self.nobjects, 1, 1)) return (1.0 / repeated * self.P).sum(2).sum(1)
def H5(self)
Inverse difference moment.
9.045368
6.529066
1.3854
"Sum average." if not hasattr(self, '_H6'): self._H6 = ((self.rlevels2 + 2) * self.p_xplusy).sum(1) return self._H6
def H6(self)
Sum average.
11.780563
8.027865
1.467459
"Sum variance (error in Haralick's original paper here)." h6 = np.tile(self.H6(), (self.rlevels2.shape[1], 1)).transpose() return (((self.rlevels2 + 2) - h6) ** 2 * self.p_xplusy).sum(1)
def H7(self)
Sum variance (error in Haralick's original paper here).
16.881134
7.26288
2.324303
"Sum entropy." return -(self.p_xplusy * np.log(self.p_xplusy + self.eps)).sum(1)
def H8(self)
Sum entropy.
11.965335
7.319232
1.63478
"Entropy." if not hasattr(self, '_H9'): self._H9 = -(self.P * np.log(self.P + self.eps)).sum(2).sum(1) return self._H9
def H9(self)
Entropy.
4.504385
4.073174
1.105866
"Difference variance." c = (self.rlevels * self.p_xminusy).sum(1) c1 = np.tile(c, (self.nlevels,1)).transpose() e = self.rlevels - c1 return (self.p_xminusy * e ** 2).sum(1)
def H10(self)
Difference variance.
8.270352
6.319516
1.3087
"Difference entropy." return -(self.p_xminusy * np.log(self.p_xminusy + self.eps)).sum(1)
def H11(self)
Difference entropy.
11.189375
7.292514
1.534364
"Information measure of correlation 1." maxima = np.vstack((self.hx, self.hy)).max(0) return (self.H9() - self.hxy1) / maxima
def H12(self)
Information measure of correlation 1.
17.561493
8.739474
2.009445
"Information measure of correlation 2." # An imaginary result has been encountered once in the Matlab # version. The reason is unclear. return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9())))
def H13(self)
Information measure of correlation 2.
27.028978
15.464663
1.74779
n_max = np.max(zernike_indexes[:,0]) factorial = np.ones((1 + n_max,), dtype=float) factorial[1:] = np.cumproduct(np.arange(1, 1 + n_max, dtype=float)) width = int(n_max//2 + 1) lut = np.zeros((zernike_indexes.shape[0],width), dtype=float) for idx, (n, m) in enumerate(zernike_indexes): alt = 1 npmh = (n+m)//2 nmmh = (n-m)//2 for k in range(0,nmmh+1): lut[idx,k] = \ (alt * factorial[n-k] / (factorial[k]*factorial[npmh-k]*factorial[nmmh-k])) alt = -alt return lut
def construct_zernike_lookuptable(zernike_indexes)
Return a lookup table of the sum-of-factorial part of the radial polynomial of the zernike indexes passed zernike_indexes - an Nx2 array of the Zernike polynomials to be computed.
3.078954
3.069936
1.002938
if x.shape != y.shape: raise ValueError("X and Y must have the same shape") if mask is None: pass elif mask.shape != x.shape: raise ValueError("The mask must have the same shape as X and Y") else: x = x[mask] y = y[mask] if weight is not None: weight = weight[mask] lut = construct_zernike_lookuptable(zernike_indexes) # precompute poly. coeffs. nzernikes = zernike_indexes.shape[0] # compute radii r_square = np.square(x) # r_square = x**2 np.add(r_square, np.square(y), out=r_square) # r_square = x**2 + y**2 # z = y + 1j*x # each Zernike polynomial is poly(r)*(r**m * np.exp(1j*m*phi)) == # poly(r)*(y + 1j*x)**m z = np.empty(x.shape, np.complex) np.copyto(z.real, y) np.copyto(z.imag, x) # preallocate buffers s = np.empty_like(x) zf = np.zeros((nzernikes,) + x.shape, np.complex) z_pows = {} for idx, (n, m) in enumerate(zernike_indexes): s[:]=0 if not m in z_pows: if m == 0: z_pows[m] = np.complex(1.0) else: z_pows[m] = z if m == 1 else (z ** m) z_pow = z_pows[m] # use Horner scheme for k in range((n-m)//2+1): s *= r_square s += lut[idx, k] s[r_square>1]=0 if weight is not None: s *= weight.astype(s.dtype) if m == 0: np.copyto(zf[idx], s) # zf[idx] = s else: np.multiply(s, z_pow, out=zf[idx]) # zf[idx] = s*exp_term if mask is None: result = zf.transpose( tuple(range(1, 1+x.ndim)) + (0, )) else: result = np.zeros( mask.shape + (nzernikes,), np.complex) result[mask] = zf.transpose( tuple(range(1, 1 + x.ndim)) + (0, )) return result
def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None)
Return the zerike polynomials for all objects in an image x - the X distance of a point from the center of its object y - the Y distance of a point from the center of its object zernike_indexes - an Nx2 array of the Zernike polynomials to be computed. mask - a mask with same shape as X and Y of the points to consider weight - weightings of points with the same shape as X and Y (default weight on each point is 1). returns a height x width x N array of complex numbers which are the e^i portion of the sine and cosine of the Zernikes
3.014352
3.031576
0.994318
if indexes is None: indexes = np.arange(1,np.max(labels)+1,dtype=np.int32) else: indexes = np.array(indexes, dtype=np.int32) radii = np.asarray(radii, dtype=float) n = radii.size k = zf.shape[2] score = np.zeros((n,k)) if n == 0: return score areas = np.square(radii) areas *= np.pi for ki in range(k): zfk=zf[:,:,ki] real_score = scipy.ndimage.sum(zfk.real,labels,indexes) real_score = fixup_scipy_ndimage_result(real_score) imag_score = scipy.ndimage.sum(zfk.imag,labels,indexes) imag_score = fixup_scipy_ndimage_result(imag_score) # one_score = np.sqrt(real_score**2+imag_score**2) / areas np.square(real_score, out=real_score) np.square(imag_score, out=imag_score) one_score = real_score + imag_score np.sqrt(one_score, out=one_score) one_score /= areas score[:,ki] = one_score return score
def score_zernike(zf, radii, labels, indexes=None)
Score the output of construct_zernike_polynomials zf - the output of construct_zernike_polynomials which is I x J x K where K is the number of zernike polynomials computed radii - a vector of the radius of each of N labeled objects labels - a label matrix outputs a N x K matrix of the scores of each of the Zernikes for each labeled object.
2.339658
2.380074
0.983019
# # "Reverse_indexes" is -1 if a label # is not to be processed. Otherwise # reverse_index[label] gives you the index into indexes of the label # and other similarly shaped vectors (like the results) # indexes = np.array(indexes,dtype=np.int32) nindexes = len(indexes) reverse_indexes = np.empty((np.max(indexes)+1,),int) reverse_indexes.fill(-1) reverse_indexes[indexes] = np.arange(indexes.shape[0],dtype=int) mask = reverse_indexes[labels] != -1 centers,radii = minimum_enclosing_circle(labels,indexes) ny, nx = labels.shape[0:2] y, x = np.asarray(np.mgrid[0:ny-1:complex(0,ny),0:nx-1:complex(0,nx)], dtype=float) xm = x[mask] ym = y[mask] lm = labels[mask] # # The Zernikes are inscribed in circles with points labeled by # their fractional distance (-1 <= x,y <= 1) from the center. # So we transform x and y by subtracting the center and # dividing by the radius # rev_ind = reverse_indexes[lm] ## ym = (ym-centers[reverse_indexes[lm],0]) / radii[reverse_indexes[lm]] ym -= centers[rev_ind,0] ym /= radii[rev_ind] ## xm = (xm-centers[reverse_indexes[lm],1]) / radii[reverse_indexes[lm]] xm -= centers[rev_ind,1] xm /= radii[rev_ind] # # Blow up ym and xm into new x and y vectors # x = np.zeros_like(x) x[mask]=xm y = np.zeros_like(y) y[mask]=ym # # Pass the resulting x and y through the rest of Zernikeland # score = np.zeros((nindexes, len(zernike_indexes))) zf = construct_zernike_polynomials(x, y, zernike_indexes, mask) score = score_zernike(zf, radii, labels, indexes) return score
def zernike(zernike_indexes,labels,indexes)
Compute the Zernike features for the labels with the label #s in indexes returns the score per labels and an array of one image per zernike feature
3.98122
3.888076
1.023956
def zernike_indexes_iter(n_max): for n in range(0, n_max): for m in range(n%2, n+1, 2): yield n yield m z_ind = np.fromiter(zernike_indexes_iter(limit), np.intc) z_ind = z_ind.reshape( (len(z_ind) // 2, 2) ) return z_ind
def get_zernike_indexes(limit=10)
Return a list of all Zernike indexes up to the given limit limit - return all Zernike indexes with N less than this limit returns an array of 2-tuples. Each tuple is organized as (N,M). The Zernikes are stored as complex numbers with the real part being (N,M) and the imaginary being (N,-M)
3.09361
3.34795
0.924031
if image.shape != labels.shape: raise ValueError("Image shape %s != label shape %s" % (repr(image.shape), repr(labels.shape))) if image.shape != mask.shape: raise ValueError("Image shape %s != mask shape %s" % (repr(image.shape), repr(mask.shape))) labels_out = np.zeros(labels.shape, np.int32) distances = -np.ones(labels.shape, np.float64) distances[labels > 0] = 0 labels_and_mask = np.logical_and(labels != 0, mask) coords = np.argwhere(labels_and_mask) i1,i2 = _propagate.convert_to_ints(0.0) ncoords = coords.shape[0] pq = np.column_stack((np.ones((ncoords,), int) * i1, np.ones((ncoords,),int) * i2, labels[labels_and_mask], coords)) _propagate.propagate(np.ascontiguousarray(image, np.float64), np.ascontiguousarray(pq,np.int32), np.ascontiguousarray(mask, np.int8), labels_out, distances, float(weight)) labels_out[labels > 0] = labels[labels > 0] return labels_out, distances
def propagate(image, labels, mask, weight)
Propagate the labels to the nearest pixels image - gives the Z height when computing distance labels - the labeled image pixels mask - only label pixels within the mask weight - the weighting of x/y distance vs z distance high numbers favor x/y, low favor z returns a label matrix and the computed distances
2.607027
2.573641
1.012972
'''Perform the reduction transfer step from the Jonker-Volgenant algorithm The data is input in a ragged array in terms of "i" structured as a vector of values for each i,j combination where: ii - the i to be reduced j - the j-index of every entry idx - the index of the first entry for each i count - the number of entries for each i x - the assignment of j to i u - the dual variable "u" which will be updated. It should be initialized to zero for the first reduction transfer. v - the dual variable "v" which will be reduced in-place c - the cost for each entry. The code described in the paper is: for each assigned row i do begin j1:=x[i]; u=min {c[i,j]-v[j] | j=1..n, j != j1}; v[j1]:=v[j1]-(u-u[i]); u[i] = u; end; The authors note that reduction transfer can be applied in later stages of the algorithm but does not seem to provide a substantial benefit in speed. ''' for i in ii: j1 = x[i] jj = j[idx[i]:(idx[i]+count[i])] uu = np.min((c[idx[i]:(idx[i]+count[i])] - v[jj])[jj != j1]) v[j1] = v[j1] - uu + u[i] u[i] = uu
def slow_reduction_transfer(ii, j, idx, count, x, u, v, c)
Perform the reduction transfer step from the Jonker-Volgenant algorithm The data is input in a ragged array in terms of "i" structured as a vector of values for each i,j combination where: ii - the i to be reduced j - the j-index of every entry idx - the index of the first entry for each i count - the number of entries for each i x - the assignment of j to i u - the dual variable "u" which will be updated. It should be initialized to zero for the first reduction transfer. v - the dual variable "v" which will be reduced in-place c - the cost for each entry. The code described in the paper is: for each assigned row i do begin j1:=x[i]; u=min {c[i,j]-v[j] | j=1..n, j != j1}; v[j1]:=v[j1]-(u-u[i]); u[i] = u; end; The authors note that reduction transfer can be applied in later stages of the algorithm but does not seem to provide a substantial benefit in speed.
7.326108
1.371971
5.339843
'''Perform the augmenting row reduction step from the Jonker-Volgenaut algorithm n - the number of i and j in the linear assignment problem ii - the unassigned i jj - the j-index of every entry in c idx - the index of the first entry for each i count - the number of entries for each i x - the assignment of j to i y - the assignment of i to j u - the dual variable "u" which will be updated. It should be initialized to zero for the first reduction transfer. v - the dual variable "v" which will be reduced in-place c - the cost for each entry. returns the new unassigned i ''' ####################################### # # From Jonker: # # procedure AUGMENTING ROW REDUCTION; # begin # LIST: = {all unassigned rows}; # for all i in LIST do # repeat # ul:=min {c[i,j]-v[j] for j=l ...n}; # select j1 with c [i,j 1] - v[j 1] = u1; # u2:=min {c[i,j]-v[j] for j=l ...n,j< >jl} ; # select j2 with c [i,j2] - v [j2] = u2 and j2 < >j 1 ; # u[i]:=u2; # if ul <u2 then v[jl]:=v[jl]-(u2-ul) # else if jl is assigned then jl : =j2; # k:=y [jl]; if k>0 then x [k]:=0; x[i]:=jl; y [ j l ] : = i ; i:=k # until ul =u2 (* no reduction transfer *) or k=0 i~* augmentation *) # end ii = list(ii) k = 0 limit = len(ii) free = [] while k < limit: i = ii[k] k += 1 j = jj[idx[i]:(idx[i] + count[i])] uu = c[idx[i]:(idx[i] + count[i])] - v[j] order = np.lexsort([uu]) u1, u2 = uu[order[:2]] j1,j2 = j[order[:2]] i1 = y[j1] if u1 < u2: v[j1] = v[j1] - u2 + u1 elif i1 != n: j1 = j2 i1 = y[j1] if i1 != n: if u1 < u2: k -= 1 ii[k] = i1 else: free.append(i1) x[i] = j1 y[j1] = i return np.array(free,np.uint32)
def slow_augmenting_row_reduction(n, ii, jj, idx, count, x, y, u, v, c)
Perform the augmenting row reduction step from the Jonker-Volgenaut algorithm n - the number of i and j in the linear assignment problem ii - the unassigned i jj - the j-index of every entry in c idx - the index of the first entry for each i count - the number of entries for each i x - the assignment of j to i y - the assignment of i to j u - the dual variable "u" which will be updated. It should be initialized to zero for the first reduction transfer. v - the dual variable "v" which will be reduced in-place c - the cost for each entry. returns the new unassigned i
6.279069
4.131418
1.519834
new_records = dict() for lg_name, linkage_group in itertools.groupby( linkage_records, operator.itemgetter(0) ): new_records[lg_name] = [] for record in linkage_group: init_contig = record[-1] start = record[1] end = record[2] new_record = [init_contig, -3, start, end, 1] new_records[lg_name].append(new_record) return new_records
def linkage_group_ordering(linkage_records)
Convert degenerate linkage records into ordered info_frags-like records for comparison purposes. Simple example: >>> linkage_records = [ ... ['linkage_group_1', 31842, 94039, 'sctg_207'], ... ['linkage_group_1', 95303, 95303, 'sctg_207'], ... ['linkage_group_2', 15892, 25865, 'sctg_308'], ... ['linkage_group_2', 41893, 41893, 'sctg_486'], ... ['linkage_group_3', 36614, 50582, 'sctg_516'], ... ] >>> ordering = linkage_group_ordering(linkage_records) Each key of the record is a newly-formed 'scaffold' (linkage group): >>> sorted(ordering.keys()) ['linkage_group_1', 'linkage_group_2', 'linkage_group_3'] Records are in the form [init_contig, frag_id, start, end, orientation]. Since fragment ids are meaningless in non-HiC contexts a negative identifier is set so it is understood that region was added due to linkage data (-1 is for recovering data after first-pass polishing and -2 is for sequence insertions after long read based polishing). >>> ordering['linkage_group_1'] [['sctg_207', -3, 31842, 94039, 1], ['sctg_207', -3, 95303, 95303, 1]] >>> ordering['linkage_group_2'] [['sctg_308', -3, 15892, 25865, 1], ['sctg_486', -3, 41893, 41893, 1]] Orientations are always set to 1 by default. >>> ordering['linkage_group_3'] [['sctg_516', -3, 36614, 50582, 1]]
3.081919
2.703942
1.139788
new_scaffolds = {} with open(info_frags, "r") as info_frags_handle: current_new_contig = None for line in info_frags_handle: if line.startswith(">"): current_new_contig = str(line[1:-1]) new_scaffolds[current_new_contig] = [] elif line.startswith("init_contig"): pass else: (init_contig, id_frag, orientation, pos_start, pos_end) = str( line[:-1] ).split("\t") start = int(pos_start) end = int(pos_end) ori = int(orientation) fragid = int(id_frag) assert start < end assert ori in {-1, 1} new_scaffolds[current_new_contig].append( [init_contig, fragid, start, end, ori] ) return new_scaffolds
def parse_info_frags(info_frags)
Import an info_frags.txt file and return a dictionary where each key is a newly formed scaffold and each value is the list of bins and their origin on the initial scaffolding.
2.702206
2.467373
1.095175
new_scaffolds = {} with open(bed_file) as bed_handle: for line in bed_handle: chrom, start, end, query, qual, strand = line.split()[:7] if strand == "+": ori = 1 elif strand == "-": ori = -1 else: raise ValueError( "Error when parsing strand " "orientation: {}".format(strand) ) if int(qual) > 0: bed_bin = [query, -2, int(start), int(end), ori] try: new_scaffolds[chrom].append(bed_bin) except KeyError: new_scaffolds[chrom] = [bed_bin] return new_scaffolds
def parse_bed(bed_file)
Import a BED file (where the data entries are analogous to what may be expected in an info_frags.txt file) and return a scaffold dictionary, similarly to parse_info_frags.
3.006172
2.890983
1.039844
new_scaffolds = {} def are_overlapping(bin1, bin2): if bin2 is None: return False init1, _, start1, end1, _ = bin1 init2, _, start2, end2, _ = bin2 if init1 != init2: return False else: return (start2 <= start1 <= end2) or (start1 <= start2 <= end1) def merge_bins(bin1, bin2, ignore_ori=True): init1, _, start1, end1, ori1 = bin1 init2, _, start2, end2, ori2 = bin2 assert init1 == init2 start = min(start1, start2) end = max(end1, end2) if ignore_ori: ori = ori1 else: len1 = end1 - start1 len2 = end2 - start2 if len2 > len1: ori = ori2 else: ori = ori1 new_bin = [init1, -3, start, end, ori] return new_bin corrector_bins = copy.deepcopy(corrector) for name, scaffold in scaffolds.items(): new_scaffold = [] for _, blocks in itertools.groupby(scaffold, operator.itemgetter(0)): merged_bin = None while "Reading blocks": try: my_bin = next(blocks) if are_overlapping(my_bin, merged_bin): merged_bin = merge_bins(my_bin, merged_bin) continue else: if merged_bin is not None: new_scaffold.append(merged_bin) merged_bin = my_bin i = 0 for correct_bin in corrector_bins: if are_overlapping(my_bin, correct_bin): merged_bin = merge_bins(my_bin, correct_bin) corrector_bins.pop(i) i -= 1 i += 1 except StopIteration: if merged_bin is not None: new_scaffold.append(merged_bin) break new_scaffolds[name] = new_scaffold return new_scaffolds
def correct_scaffolds(scaffolds, corrector)
Unfinished
2.272612
2.262498
1.004471
if isinstance(info_frags, dict): return info_frags else: try: scaffolds = parse_info_frags(info_frags) return scaffolds except OSError: print("Error when opening info_frags.txt") raise
def format_info_frags(info_frags)
A function to seamlessly run on either scaffold dictionaries or info_frags.txt files without having to check the input first.
3.765832
2.906075
1.295848
scaffolds = format_info_frags(scaffolds) for name, scaffold in scaffolds.items(): plt.figure() xs = range(len(scaffold)) color = [] names = {} ys = [] for my_bin in scaffold: current_color = "r" if my_bin[4] > 0 else "g" color += [current_color] name = my_bin[0] if name in names: ys.append(names[name]) else: names[name] = len(names) ys.append(names[name]) plt.scatter(xs, ys, c=color) plt.show()
def plot_info_frags(scaffolds)
A crude way to visualize new scaffolds according to their origin on the initial scaffolding. Each scaffold spawns a new plot. Orientations are represented by different colors.
3.018416
3.042775
0.991994
scaffolds = format_info_frags(scaffolds) new_scaffolds = {} for name, scaffold in scaffolds.items(): new_scaffold = [] if len(scaffold) > 2: for i in range(len(scaffold)): # First take care of edge cases: *-- or --* if i == 0: if not ( scaffold[i][0] != scaffold[i + 1][0] and scaffold[i + 1][0] == scaffold[i + 2][0] ): new_scaffold.append(scaffold[i]) elif i == len(scaffold) - 1: if not ( scaffold[i][0] != scaffold[i - 1][0] and scaffold[i - 1][0] == scaffold[i - 2][0] ): new_scaffold.append(scaffold[i]) # Otherwise, looking for -*- else: if not ( scaffold[i - 1][0] == scaffold[i + 1][0] and scaffold[i - 1][0] != scaffold[i][0] ): new_scaffold.append(scaffold[i]) else: # Can't remove insertions if 2 bins or less new_scaffold = copy.deepcopy(scaffold) new_scaffolds[name] = new_scaffold return new_scaffolds
def remove_spurious_insertions(scaffolds)
Remove all bins whose left and right neighbors belong to the same, different scaffold. Example with three such insertions in two different scaffolds: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 0, 0, 100, 1], ... ["contig1", 1, 100, 200, 1], ... ["contig23", 53, 1845, 2058, -1], # <-- insertion ... ["contig1", 4, 254, 408, 1], ... ["contig1", 7, 805, 1253, 1], ... ["contig5", 23, 1500, 1605, -1], ... ["contig65", 405, 32145, 45548, -1], # <-- insertion ... ["contig5", 22, 1385, 1499, -1], ... ], ... "scaffold2": [ ... ["contig8", 0, 0, 250, 1], ... ["contig17", 2454, 8754, -1], # <-- insertion ... ["contig8", 2, 320, 480, 1], ... ], ... } >>> new_scaffolds = remove_spurious_insertions(scaffolds) >>> for my_bin in new_scaffolds['scaffold1']: ... print(my_bin) ... ['contig1', 0, 0, 100, 1] ['contig1', 1, 100, 200, 1] ['contig1', 4, 254, 408, 1] ['contig1', 7, 805, 1253, 1] ['contig5', 23, 1500, 1605, -1] ['contig5', 22, 1385, 1499, -1] >>> for my_bin in new_scaffolds['scaffold2']: ... print(my_bin) ... ['contig8', 0, 0, 250, 1] ['contig8', 2, 320, 480, 1]
2.304467
2.319477
0.993529
scaffolds = format_info_frags(scaffolds) new_scaffolds = {} ordering = dict() for name, scaffold in scaffolds.items(): new_scaffold = [] ordering = dict() order = 0 my_blocks = [] for _, my_block in itertools.groupby(scaffold, operator.itemgetter(0)): my_bins = list(my_block) my_blocks.append(my_bins) block_length = len(my_bins) block_name = my_bins[0][0] if block_name in ordering.keys(): if block_length > ordering[block_name][1]: ordering[block_name] = (order, block_length) else: ordering[block_name] = (order, block_length) order += 1 def block_order(block): return ordering[block[0][0]][0] for my_block in sorted(my_blocks, key=block_order): for my_bin in my_block: new_scaffold.append(my_bin) new_scaffolds[name] = copy.deepcopy(new_scaffold) return new_scaffolds
def rearrange_intra_scaffolds(scaffolds)
Rearranges all bins within each scaffold such that all bins belonging to the same initial contig are grouped together in the same order. When two such groups are found, the smaller one is moved to the larger one.
2.530668
2.527063
1.001427
init_genome = { record.id: record.seq for record in SeqIO.parse(init_fasta, "fasta") } my_new_records = [] with open(info_frags, "r") as info_frags_handle: current_seq = "" current_id = None previous_contig = None for line in info_frags_handle: if line.startswith(">"): previous_contig = None if current_id is not None: new_record = SeqRecord( current_seq, id=current_id, description="" ) my_new_records.append(new_record) current_seq = "" current_id = str(line[1:]) elif line.startswith("init_contig"): previous_contig = None else: (init_contig, _, orientation, pos_start, pos_end) = str( line[:-1] ).split("\t") start = int(pos_start) end = int(pos_end) ori = int(orientation) assert start < end assert ori in {-1, 1} seq_to_add = init_genome[init_contig][start:end] if ori == 1: current_seq += seq_to_add elif ori == -1: current_seq += seq_to_add.reverse_complement() if junction and previous_contig not in {init_contig, None}: error_was_raised = False try: extra_seq = Seq(junction, IUPAC.ambiguous_dna) current_seq = extra_seq + current_seq except TypeError: if not error_was_raised: print("Invalid junction sequence") error_was_raised = True previous_contig = init_contig new_record = SeqRecord(current_seq, id=current_id, description="") my_new_records.append(new_record) SeqIO.write(my_new_records, output, "fasta")
def write_fasta( init_fasta, info_frags, output=DEFAULT_NEW_GENOME_NAME, junction=False )
Convert an info_frags.txt file into a fasta file given a reference. Optionally adds junction sequences to reflect the possibly missing base pairs between two newly joined scaffolds.
2.311328
2.269135
1.018594
id_set = set((my_bin[1] for my_bin in bin_list)) start_id, end_id = min(id_set), max(id_set) return id_set == set(range(start_id, end_id + 1))
def is_block(bin_list)
Check if a bin list has exclusively consecutive bin ids.
3.265809
2.76362
1.181714
ge = np.empty_like(xi) for i, (v, bound) in enumerate(zip(xi, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints ge[i] = 1.0 elif b == None: # only min ge[i] = v / np.sqrt(v ** 2 + 1) elif a == None: # only max ge[i] = -v / np.sqrt(v ** 2 + 1) else: # both min and max ge[i] = (b - a) * np.cos(v) / 2. return ge
def internal2external_grad(xi, bounds)
Calculate the internal to external gradiant Calculates the partial of external over internal
2.562318
2.582968
0.992005
xe = np.empty_like(xi) for i, (v, bound) in enumerate(zip(xi, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints xe[i] = v elif b == None: # only min xe[i] = a - 1. + np.sqrt(v ** 2. + 1.) elif a == None: # only max xe[i] = b + 1. - np.sqrt(v ** 2. + 1.) else: # both min and max xe[i] = a + ((b - a) / 2.) * (np.sin(v) + 1.) return xe
def internal2external(xi, bounds)
Convert a series of internal variables to external variables
2.357812
2.371768
0.994116
xi = np.empty_like(xe) for i, (v, bound) in enumerate(zip(xe, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints xi[i] = v elif b == None: # only min xi[i] = np.sqrt((v - a + 1.) ** 2. - 1) elif a == None: # only max xi[i] = np.sqrt((b - v + 1.) ** 2. - 1) else: # both min and max xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.) return xi
def external2internal(xe, bounds)
Convert a series of external variables to internal variables
2.411357
2.427361
0.993407
fjac = infodic["fjac"] ipvt = infodic["ipvt"] n = len(p) # adapted from leastsq function in scipy/optimize/minpack.py perm = np.take(np.eye(n), ipvt - 1, 0) r = np.triu(np.transpose(fjac)[:n, :]) R = np.dot(r, perm) try: cov_x = np.linalg.inv(np.dot(np.transpose(R), R)) except LinAlgError: cov_x = None return cov_x
def calc_cov_x(infodic, p)
Calculate cov_x from fjac, ipvt and p as is done in leastsq
3.540187
2.997216
1.181158
# check for full output if "full_output" in kw and kw["full_output"]: full = True else: full = False # convert x0 to internal variables i0 = external2internal(x0, bounds) # perfrom unconstrained optimization using internal variables r = leastsq(err, i0, args=(bounds, func, args), **kw) # unpack return convert to external variables and return if full: xi, cov_xi, infodic, mesg, ier = r xe = internal2external(xi, bounds) cov_xe = i2e_cov_x(xi, bounds, cov_xi) # XXX correct infodic 'fjac','ipvt', and 'qtf' return xe, cov_xe, infodic, mesg, ier else: xi, ier = r xe = internal2external(xi, bounds) return xe, ier
def leastsqbound(func, x0, bounds, args=(), **kw)
Constrained multivariant Levenberg-Marquard optimization Minimize the sum of squares of a given function using the Levenberg-Marquard algorithm. Contraints on parameters are inforced using variable transformations as described in the MINUIT User's Guide by Fred James and Matthias Winkler. Parameters: * func functions to call for optimization. * x0 Starting estimate for the minimization. * bounds (min,max) pair for each element of x, defining the bounds on that parameter. Use None for one of min or max when there is no bound in that direction. * args Any extra arguments to func are places in this tuple. Returns: (x,{cov_x,infodict,mesg},ier) Return is described in the scipy.optimize.leastsq function. x and con_v are corrected to take into account the parameter transformation, infodic is not corrected. Additional keyword arguments are passed directly to the scipy.optimize.leastsq algorithm.
4.590624
4.54418
1.010221
if self.set_status: self.github_repo.create_status( state="pending", description="Static analysis in progress.", context="inline-plz", sha=self.last_sha, )
def start_review(self)
Mark our review as started.
7.991633
7.178685
1.113245
if self.set_status: if error: self.github_repo.create_status( state="error", description="Static analysis error! inline-plz failed to run.", context="inline-plz", sha=self.last_sha, ) elif success: self.github_repo.create_status( state="success", description="Static analysis complete! No errors found in your PR.", context="inline-plz", sha=self.last_sha, ) else: self.github_repo.create_status( state="failure", description="Static analysis complete! Found errors in your PR.", context="inline-plz", sha=self.last_sha, )
def finish_review(self, success=True, error=False)
Mark our review as finished.
2.809036
2.75897
1.018146
try: latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha print("Latest remote sha: {}".format(latest_remote_sha)) try: print("Ratelimit remaining: {}".format(self.github.ratelimit_remaining)) except Exception: print("Failed to look up ratelimit remaining") return self.last_sha != latest_remote_sha except IndexError: return False
def out_of_date(self)
Check if our local latest sha matches the remote latest sha
4.952682
4.363266
1.135086
if not message.line_number: message.line_number = 1 for patched_file in self.patch: target = patched_file.target_file.lstrip("b/") if target == message.path: offset = 1 for hunk in patched_file: for position, hunk_line in enumerate(hunk): if hunk_line.target_line_no == message.line_number: if not hunk_line.is_added: # if the line isn't an added line, we don't want to comment on it return return position + offset offset += len(hunk) + 1
def position(self, message)
Calculate position within the PR, which is not the line number
4.637352
4.360364
1.063524
sparse_dict = dict() h = open(abs_contact_file, "r") all_lines = h.readlines() n_lines = len(all_lines) for i in range(1, n_lines): line = all_lines[i] dat = line.split() mates = [int(dat[0]), int(dat[1])] mates.sort() f1 = mates[0] - 1 f2 = mates[1] - 1 if f1 in sparse_dict: if f2 in sparse_dict[f1]: sparse_dict[f1][f2] += 1 else: sparse_dict[f1][f2] = 1 else: sparse_dict[f1] = dict() sparse_dict[f1][f2] = 1 keys = list(sparse_dict.keys()) keys.sort() h.close() h_coo = open(coo_file, "w") h_coo.write("%s\t%s\t%s\n" % ("id_frag_a", "id_frag_b", "n_contact")) for fa in keys: d_fb = sparse_dict[fa] keys_b = list(d_fb.keys()) keys_b.sort() for fb in keys_b: nc = d_fb[fb] h_coo.write("%s\t%s\t%s\n" % (str(fa), str(fb), str(nc))) h_coo.close() h.close()
def abs_contact_2_coo_file(abs_contact_file, coo_file)
Convert contact maps between old-style and new-style formats. A legacy function that converts contact maps from the older GRAAL format to the simpler instaGRAAL format. This is useful with datasets generated by Hi-C box. Parameters ---------- abs_contact_file : str, file or pathlib.Path The input old-style contact map. coo_file : str, file, or pathlib.Path The output path to the generated contact map; must be writable.
1.892261
2.000328
0.945975
sparse_dict = dict() h = open(contact_file, "r") all_lines = h.readlines() n_lines = len(all_lines) for i in range(1, n_lines): line = all_lines[i] dat = line.split() mates = [int(dat[0]), int(dat[1])] nc = int(dat[2]) mates.sort() f1 = mates[0] f2 = mates[1] if f1 in sparse_dict: if f2 in sparse_dict[f1]: sparse_dict[f1][f2] += nc else: sparse_dict[f1][f2] = nc else: sparse_dict[f1] = dict() sparse_dict[f1][f2] = nc keys = list(sparse_dict.keys()) keys.sort() out_r = [] out_c = [] out_d = [] for r in keys: data = sparse_dict[r] for c in list(data.keys()): out_r.append(r) out_c.append(c) out_d.append(data[c]) n_on_pxls = len(out_d) level_hdf5 = pyramid_handle.create_group(str(level)) data_2_sparse = level_hdf5.create_dataset("data", (3, n_on_pxls), "i") data_nfrags = level_hdf5.create_dataset("nfrags", (1, 1), "i") np_csr = np.zeros((3, n_on_pxls), dtype=np.int32) np_csr[0, :] = out_r np_csr[1, :] = out_c np_csr[2, :] = out_d data_2_sparse[0, :] = out_r data_2_sparse[1, :] = out_c data_2_sparse[2, :] = out_d data_nfrags[:] = nfrags
def fill_sparse_pyramid_level(pyramid_handle, level, contact_file, nfrags)
Fill a level with sparse contact map data Fill values from the simple text matrix file to the hdf5-based pyramid level with contact data. Parameters ---------- pyramid_handle : h5py.File The hdf5 file handle containing the whole dataset. level : int The level (resolution) to be filled with contact data. contact_file : str, file or pathlib.Path The binned contact map file to be converted to hdf5 data. nfrags : int The number of fragments/bins in that specific level.
2.029
2.051959
0.988811
handle_frag_list = open(fragment_list, "r") handle_new_frag_list = open(new_frag_list, "w") handle_new_frag_list.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( "id", "chrom", "start_pos", "end_pos", "size", "gc_content", "accu_frag", "frag_start", "frag_end", ) ) handle_frag_list.readline() i = 0 while 1: line_frag = handle_frag_list.readline() if not line_frag: handle_frag_list.close() handle_new_frag_list.close() break i += 1 data = line_frag.split("\t") id_init = data[0] contig_name = data[1] start_pos = data[2] end_pos = data[3] length_kb = data[4] gc_content = str(float(data[5])) accu_frag = str(1) frag_start = id_init frag_end = id_init handle_new_frag_list.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( id_init, contig_name, start_pos, end_pos, length_kb, gc_content, accu_frag, frag_start, frag_end, ) ) return i
def init_frag_list(fragment_list, new_frag_list)
Adapt the original fragment list to fit the build function requirements Parameters ---------- fragment_list : str, file or pathlib.Path The input fragment list. new_frag_list : str, file or pathlib.Path The output fragment list to be written. Returns ------- i : int The number of records processed this way.
1.753774
1.749032
1.002712
level = curr_frag[1] frag = curr_frag[0] output = [] if level > 0: str_level = str(level) sub_low = self.spec_level[str_level]["fragments_dict"][frag][ "sub_low_index" ] sub_high = self.spec_level[str_level]["fragments_dict"][frag][ "sub_high_index" ] new_level = level - 1 for i in range(sub_low, sub_high + 1): output.append((i, new_level)) else: output.append(curr_frag) return output
def zoom_in_frag(self, curr_frag)
:param curr_frag:
2.879413
2.821898
1.020381
level = curr_frag[1] frag = curr_frag[0] output = [] if level > 0: str_level = str(level) high_frag = self.spec_level[str_level]["fragments_dict"][frag][ "super_index" ] new_level = level + 1 output = (high_frag, new_level) else: output = curr_frag return output
def zoom_out_frag(self, curr_frag)
:param curr_frag:
5.031323
4.858779
1.035512