_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q17800
NeighbourMovementTracking.calculate_costs
train
def calculate_costs(self, detections_1, detections_2, calculate_match_cost, params): """ Calculates assignment costs between detections and 'empty' spaces. The smaller cost the better. @param detections_1: cell list of size n in previous frame @param detections_2: cell list of size m in current frame @return: cost matrix (n+m)x(n+m) extended by cost of matching cells with emptiness """ global invalid_match size_sum = len(detections_1) + len(detections_2) # Cost matrix extended by matching cells with nothing # (for detection 1 it means losing cells, for detection 2 it means new cells). cost_matrix = numpy.zeros((size_sum, size_sum)) # lost cells cost cost_matrix[0:len(detections_1), len(detections_2):size_sum] = params["default_empty_cost"] + (1 - numpy.eye( len(detections_1), len(detections_1))) * invalid_match # new cells cost cost_matrix[len(detections_1):size_sum, 0:len(detections_2)] = params["default_empty_cost"] + (1 - numpy.eye( len(detections_2), len(detections_2))) * invalid_match # increase costs for reliable detections for row in [i for i in range(0, len(detections_1)) if detections_1[i].is_reliable() and ( not params["check_if_big"] or self.is_cell_big(detections_1[i]))]: cost_matrix[row, len(detections_2):size_sum] *= params["default_empty_reliable_cost_mult"] for col in [i for i in range(0, len(detections_2)) if detections_2[i].is_reliable() and ( not params["check_if_big"] or self.is_cell_big(detections_2[i]))]: cost_matrix[len(detections_1):size_sum, col] *= params["default_empty_reliable_cost_mult"] # calculate cost of matching cells def cost_if_not_too_far(detection_1, detection_2): if detection_1.distance(detection_2) <= self.parameters_tracking["max_distance"]: return calculate_match_cost(detection_1, detection_2) else: return invalid_match cost_matrix[0:len(detections_1), 0:len(detections_2)] = [[cost_if_not_too_far(d1, d2) for d2 in detections_2] for d1 in detections_1] return cost_matrix
python
{ "resource": "" }
q17801
NeighbourMovementTracking.solve_assignement
train
def solve_assignement(self, costs): """ Solves assignment problem using Hungarian implementation by Brian M. Clapper. @param costs: square cost matrix @return: assignment function @rtype: int->int """ if costs is None or len(costs) == 0: return dict() n = costs.shape[0] pairs = [(i, j) for i in range(0, n) for j in range(0, n) if costs[i, j] < invalid_match] costs_list = [costs[i, j] for (i, j) in pairs] assignment = lapjv.lapjv(list(zip(*pairs))[0], list(zip(*pairs))[1], costs_list) indexes = enumerate(list(assignment[0])) return dict([(row, col) for row, col in indexes])
python
{ "resource": "" }
q17802
circular_gaussian_kernel
train
def circular_gaussian_kernel(sd,radius): """Create a 2-d Gaussian convolution kernel sd - standard deviation of the gaussian in pixels radius - build a circular kernel that convolves all points in the circle bounded by this radius """ i,j = np.mgrid[-radius:radius+1,-radius:radius+1].astype(float) / radius mask = i**2 + j**2 <= 1 i = i * radius / sd j = j * radius / sd kernel = np.zeros((2*radius+1,2*radius+1)) kernel[mask] = np.e ** (-(i[mask]**2+j[mask]**2) / (2 * sd **2)) # # Normalize the kernel so that there is no net effect on a uniform image # kernel = kernel / np.sum(kernel) return kernel
python
{ "resource": "" }
q17803
get_threshold
train
def get_threshold(threshold_method, threshold_modifier, image, mask=None, labels = None, threshold_range_min = None, threshold_range_max = None, threshold_correction_factor = 1.0, adaptive_window_size = 10, **kwargs): """Compute a threshold for an image threshold_method - one of the TM_ methods above threshold_modifier - TM_GLOBAL to calculate one threshold over entire image TM_ADAPTIVE to calculate a per-pixel threshold TM_PER_OBJECT to calculate a different threshold for each object image - a NxM numpy array of the image data Returns a tuple of local_threshold and global_threshold where: * global_threshold is the single number calculated using the threshold method over the whole image * local_threshold is the global_threshold for global methods. For adaptive and per-object thresholding, local_threshold is a matrix of threshold values representing the threshold to be applied at each pixel of the image. Different methods have optional and required parameters: Required: TM_PER_OBJECT: labels - a labels matrix that defines the extents of the individual objects to be thresholded separately. Optional: All: mask - a mask of the significant pixels in the image threshold_range_min, threshold_range_max - constrain the threshold values to be examined to values between these limits threshold_correction_factor - the calculated threshold is multiplied by this number to get the final threshold TM_MOG (mixture of Gaussians): object_fraction - fraction of image expected to be occupied by objects (pixels that are above the threshold) TM_OTSU - We have algorithms derived from Otsu. There is a three-class version of Otsu in addition to the two class. There is also an entropy measure in addition to the weighted variance. two_class_otsu - assume that the distribution represents two intensity classes if true, three if false. use_weighted_variance - use Otsu's weighted variance if true, an entropy measure if false assign_middle_to_foreground - assign pixels in the middle class in a three-class Otsu to the foreground if true or the background if false. """ global_threshold = get_global_threshold( threshold_method, image, mask, **kwargs) global_threshold *= threshold_correction_factor if not threshold_range_min is None: global_threshold = max(global_threshold, threshold_range_min) if not threshold_range_max is None: global_threshold = min(global_threshold, threshold_range_max) if threshold_modifier == TM_GLOBAL: local_threshold=global_threshold elif threshold_modifier == TM_ADAPTIVE: local_threshold = get_adaptive_threshold( threshold_method, image, global_threshold, mask, adaptive_window_size = adaptive_window_size, **kwargs) local_threshold = local_threshold * threshold_correction_factor elif threshold_modifier == TM_PER_OBJECT: local_threshold = get_per_object_threshold( threshold_method, image, global_threshold, mask, labels, threshold_range_min, threshold_range_max,**kwargs) local_threshold = local_threshold * threshold_correction_factor else: raise NotImplementedError("%s thresholding is not implemented"%(threshold_modifier)) if isinstance(local_threshold, np.ndarray): # # Constrain thresholds to within .7 to 1.5 of the global threshold. # threshold_range_min = max(threshold_range_min, global_threshold * .7) threshold_range_max = min(threshold_range_max, global_threshold * 1.5) if not threshold_range_min is None: local_threshold[local_threshold < threshold_range_min] = \ threshold_range_min if not threshold_range_max is None: local_threshold[local_threshold > threshold_range_max] = \ threshold_range_max if (threshold_modifier == TM_PER_OBJECT) and (labels is not None): local_threshold[labels == 0] = 1.0 else: if not threshold_range_min is None: local_threshold = max(local_threshold, threshold_range_min) if not threshold_range_max is None: local_threshold = min(local_threshold, threshold_range_max) return local_threshold, global_threshold
python
{ "resource": "" }
q17804
get_global_threshold
train
def get_global_threshold(threshold_method, image, mask = None, **kwargs): """Compute a single threshold over the whole image""" if mask is not None and not np.any(mask): return 1 if threshold_method == TM_OTSU: fn = get_otsu_threshold elif threshold_method == TM_MOG: fn = get_mog_threshold elif threshold_method == TM_BACKGROUND: fn = get_background_threshold elif threshold_method == TM_ROBUST_BACKGROUND: fn = get_robust_background_threshold elif threshold_method == TM_RIDLER_CALVARD: fn = get_ridler_calvard_threshold elif threshold_method == TM_KAPUR: fn = get_kapur_threshold elif threshold_method == TM_MCT: fn = get_maximum_correlation_threshold else: raise NotImplementedError("%s algorithm not implemented"%(threshold_method)) kwargs = dict([(k, v) for k, v in kwargs.items() if k in fn.args]) return fn(image, mask, **kwargs)
python
{ "resource": "" }
q17805
get_per_object_threshold
train
def get_per_object_threshold(method, image, threshold, mask=None, labels=None, threshold_range_min = None, threshold_range_max = None, **kwargs): """Return a matrix giving threshold per pixel calculated per-object image - image to be thresholded mask - mask out "don't care" pixels labels - a label mask indicating object boundaries threshold - the global threshold """ if labels is None: labels = np.ones(image.shape,int) if not mask is None: labels[np.logical_not(mask)] = 0 label_extents = scipy.ndimage.find_objects(labels,np.max(labels)) local_threshold = np.ones(image.shape,image.dtype) for i, extent in enumerate(label_extents, start=1): label_mask = labels[extent]==i if not mask is None: label_mask = np.logical_and(mask[extent], label_mask) values = image[extent] per_object_threshold = get_global_threshold( method, values, mask = label_mask, **kwargs) local_threshold[extent][label_mask] = per_object_threshold return local_threshold
python
{ "resource": "" }
q17806
mad
train
def mad(a): '''Calculate the median absolute deviation of a sample a - a numpy array-like collection of values returns the median of the deviation of a from its median. ''' a = np.asfarray(a).flatten() return np.median(np.abs(a - np.median(a)))
python
{ "resource": "" }
q17807
get_kapur_threshold
train
def get_kapur_threshold(image, mask=None): """The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space.""" cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)<3: return 0 if np.min(cropped_image) == np.max(cropped_image): return cropped_image[0] log_image = np.log2(smooth_with_noise(cropped_image, 8)) min_log_image = np.min(log_image) max_log_image = np.max(log_image) histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256) histogram_values = (min_log_image + (max_log_image - min_log_image)* np.arange(256, dtype=float) / 255) # drop any zero bins keep = histogram != 0 histogram = histogram[keep] histogram_values = histogram_values[keep] # check for corner cases if np.product(histogram_values)==1: return 2**histogram_values[0] # Normalize to probabilities p = histogram.astype(float) / float(np.sum(histogram)) # Find the probabilities totals up to and above each possible threshold. lo_sum = np.cumsum(p); hi_sum = lo_sum[-1] - lo_sum; lo_e = np.cumsum(p * np.log2(p)); hi_e = lo_e[-1] - lo_e; # compute the entropies lo_entropy = lo_e / lo_sum - np.log2(lo_sum); hi_entropy = hi_e / hi_sum - np.log2(hi_sum); sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]; sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf entry = np.argmin(sum_entropy); return 2**((histogram_values[entry] + histogram_values[entry+1]) / 2);
python
{ "resource": "" }
q17808
weighted_variance
train
def weighted_variance(image, mask, binary_image): """Compute the log-transformed variance of foreground and background image - intensity image used for thresholding mask - mask of ignored pixels binary_image - binary image marking foreground and background """ if not np.any(mask): return 0 # # Clamp the dynamic range of the foreground # minval = np.max(image[mask])/256 if minval == 0: return 0 fg = np.log2(np.maximum(image[binary_image & mask], minval)) bg = np.log2(np.maximum(image[(~ binary_image) & mask], minval)) nfg = np.product(fg.shape) nbg = np.product(bg.shape) if nfg == 0: return np.var(bg) elif nbg == 0: return np.var(fg) else: return (np.var(fg) * nfg + np.var(bg)*nbg) / (nfg+nbg)
python
{ "resource": "" }
q17809
sum_of_entropies
train
def sum_of_entropies(image, mask, binary_image): """Bin the foreground and background pixels and compute the entropy of the distribution of points among the bins """ mask=mask.copy() mask[np.isnan(image)] = False if not np.any(mask): return 0 # # Clamp the dynamic range of the foreground # minval = np.max(image[mask])/256 if minval == 0: return 0 clamped_image = image.copy() clamped_image[clamped_image < minval] = minval # # Smooth image with -8 bits of noise # image = smooth_with_noise(clamped_image, 8) im_min = np.min(image) im_max = np.max(image) # # Figure out the bounds for the histogram # upper = np.log2(im_max) lower = np.log2(im_min) if upper == lower: # All values are the same, answer is log2 of # of pixels return math.log(np.sum(mask),2) # # Create log-transformed lists of points in the foreground and background # fg = image[binary_image & mask] bg = image[(~ binary_image) & mask] if len(fg) == 0 or len(bg) == 0: return 0 log_fg = np.log2(fg) log_bg = np.log2(bg) # # Make these into histograms hfg = numpy_histogram(log_fg, 256, range=(lower,upper))[0] hbg = numpy_histogram(log_bg, 256, range=(lower,upper))[0] #hfg = scipy.ndimage.histogram(log_fg,lower,upper,256) #hbg = scipy.ndimage.histogram(log_bg,lower,upper,256) # # Drop empty bins # hfg = hfg[hfg>0] hbg = hbg[hbg>0] if np.product(hfg.shape) == 0: hfg = np.ones((1,),int) if np.product(hbg.shape) == 0: hbg = np.ones((1,),int) # # Normalize # hfg = hfg.astype(float) / float(np.sum(hfg)) hbg = hbg.astype(float) / float(np.sum(hbg)) # # Compute sum of entropies # return np.sum(hfg * np.log2(hfg)) + np.sum(hbg*np.log2(hbg))
python
{ "resource": "" }
q17810
log_transform
train
def log_transform(image): '''Renormalize image intensities to log space Returns a tuple of transformed image and a dictionary to be passed into inverse_log_transform. The minimum and maximum from the dictionary can be applied to an image by the inverse_log_transform to convert it back to its former intensity values. ''' orig_min, orig_max = scipy.ndimage.extrema(image)[:2] # # We add 1/2 bit noise to an 8 bit image to give the log a bottom # limage = image.copy() noise_min = orig_min + (orig_max-orig_min)/256.0+np.finfo(image.dtype).eps limage[limage < noise_min] = noise_min d = { "noise_min":noise_min} limage = np.log(limage) log_min, log_max = scipy.ndimage.extrema(limage)[:2] d["log_min"] = log_min d["log_max"] = log_max return stretch(limage), d
python
{ "resource": "" }
q17811
numpy_histogram
train
def numpy_histogram(a, bins=10, range=None, normed=False, weights=None): '''A version of numpy.histogram that accounts for numpy's version''' args = inspect.getargs(np.histogram.__code__)[0] if args[-1] == "new": return np.histogram(a, bins, range, normed, weights, new=True) return np.histogram(a, bins, range, normed, weights)
python
{ "resource": "" }
q17812
rank_order
train
def rank_order(image, nbins=None): """Return an image of the same shape where each pixel has the rank-order value of the corresponding pixel in the image. The returned image's elements are of type np.uint32 which simplifies processing in C code. """ flat_image = image.ravel() sort_order = flat_image.argsort().astype(np.uint32) flat_image = flat_image[sort_order] sort_rank = np.zeros_like(sort_order) is_different = flat_image[:-1] != flat_image[1:] np.cumsum(is_different, out=sort_rank[1:]) original_values = np.zeros((sort_rank[-1]+1,),image.dtype) original_values[0] = flat_image[0] original_values[1:] = flat_image[1:][is_different] int_image = np.zeros_like(sort_order) int_image[sort_order] = sort_rank if nbins is not None: max_ranked_data = np.max(int_image) while max_ranked_data >= nbins: # # Decimate the bins until there are fewer than nbins # hist = np.bincount(int_image) # # Rank the bins from lowest count to highest order = np.argsort(hist) # # find enough to maybe decimate to nbins # candidates = order[:max_ranked_data+2-nbins] to_delete = np.zeros(max_ranked_data+2, bool) to_delete[candidates] = True # # Choose candidates that are either not next to others # or have an even index so as not to delete adjacent bins # td_mask = to_delete[:-1] & ( ((np.arange(max_ranked_data+1) & 2) == 0) | (~ to_delete[1:])) if td_mask[0]: td_mask[0] = False # # A value to be deleted has the same index as the following # value and the two end up being merged # rd_translation = np.cumsum(~td_mask)-1 # # Translate the rankings to the new space # int_image = rd_translation[int_image] # # Eliminate the bins with low counts # original_values = original_values[~td_mask] max_ranked_data = len(original_values)-1 return (int_image.reshape(image.shape), original_values)
python
{ "resource": "" }
q17813
quantize
train
def quantize(image, nlevels): """Quantize an image into integers 0, 1, ..., nlevels - 1. image -- a numpy array of type float, range [0, 1] nlevels -- an integer """ tmp = np.array(image // (1.0 / nlevels), dtype='i1') return tmp.clip(0, nlevels - 1)
python
{ "resource": "" }
q17814
cooccurrence
train
def cooccurrence(quantized_image, labels, scale_i=3, scale_j=0): """Calculates co-occurrence matrices for all the objects in the image. Return an array P of shape (nobjects, nlevels, nlevels) such that P[o, :, :] is the cooccurence matrix for object o. quantized_image -- a numpy array of integer type labels -- a numpy array of integer type scale -- an integer For each object O, the cooccurrence matrix is defined as follows. Given a row number I in the matrix, let A be the set of pixels in O with gray level I, excluding pixels in the rightmost S columns of the image. Let B be the set of pixels in O that are S pixels to the right of a pixel in A. Row I of the cooccurence matrix is the gray-level histogram of the pixels in B. """ labels = labels.astype(int) nlevels = quantized_image.max() + 1 nobjects = labels.max() if scale_i < 0: scale_i = -scale_i scale_j = -scale_j if scale_i == 0 and scale_j > 0: image_a = quantized_image[:, :-scale_j] image_b = quantized_image[:, scale_j:] labels_ab = labels_a = labels[:, :-scale_j] labels_b = labels[:, scale_j:] elif scale_i > 0 and scale_j == 0: image_a = quantized_image[:-scale_i, :] image_b = quantized_image[scale_i:, :] labels_ab = labels_a = labels[:-scale_i, :] labels_b = labels[scale_i:, :] elif scale_i > 0 and scale_j > 0: image_a = quantized_image[:-scale_i, :-scale_j] image_b = quantized_image[scale_i:, scale_j:] labels_ab = labels_a = labels[:-scale_i, :-scale_j] labels_b = labels[scale_i:, scale_j:] else: # scale_j should be negative image_a = quantized_image[:-scale_i, -scale_j:] image_b = quantized_image[scale_i:, :scale_j] labels_ab = labels_a = labels[:-scale_i, -scale_j:] labels_b = labels[scale_i:, :scale_j] equilabel = ((labels_a == labels_b) & (labels_a > 0)) if np.any(equilabel): Q = (nlevels*nlevels*(labels_ab[equilabel]-1)+ nlevels*image_a[equilabel]+image_b[equilabel]) R = np.bincount(Q) if R.size != nobjects*nlevels*nlevels: S = np.zeros(nobjects*nlevels*nlevels-R.size) R = np.hstack((R, S)) P = R.reshape(nobjects, nlevels, nlevels) pixel_count = fix(scind.sum(equilabel, labels_ab, np.arange(nobjects, dtype=np.int32)+1)) pixel_count = np.tile(pixel_count[:,np.newaxis,np.newaxis], (1,nlevels,nlevels)) return (P.astype(float) / pixel_count.astype(float), nlevels) else: return np.zeros((nobjects, nlevels, nlevels)), nlevels
python
{ "resource": "" }
q17815
Haralick.H5
train
def H5(self): "Inverse difference moment." t = 1 + toeplitz(self.levels) ** 2 repeated = np.tile(t[np.newaxis], (self.nobjects, 1, 1)) return (1.0 / repeated * self.P).sum(2).sum(1)
python
{ "resource": "" }
q17816
Haralick.H6
train
def H6(self): "Sum average." if not hasattr(self, '_H6'): self._H6 = ((self.rlevels2 + 2) * self.p_xplusy).sum(1) return self._H6
python
{ "resource": "" }
q17817
Haralick.H8
train
def H8(self): "Sum entropy." return -(self.p_xplusy * np.log(self.p_xplusy + self.eps)).sum(1)
python
{ "resource": "" }
q17818
Haralick.H10
train
def H10(self): "Difference variance." c = (self.rlevels * self.p_xminusy).sum(1) c1 = np.tile(c, (self.nlevels,1)).transpose() e = self.rlevels - c1 return (self.p_xminusy * e ** 2).sum(1)
python
{ "resource": "" }
q17819
Haralick.H11
train
def H11(self): "Difference entropy." return -(self.p_xminusy * np.log(self.p_xminusy + self.eps)).sum(1)
python
{ "resource": "" }
q17820
Haralick.H12
train
def H12(self): "Information measure of correlation 1." maxima = np.vstack((self.hx, self.hy)).max(0) return (self.H9() - self.hxy1) / maxima
python
{ "resource": "" }
q17821
Haralick.H13
train
def H13(self): "Information measure of correlation 2." # An imaginary result has been encountered once in the Matlab # version. The reason is unclear. return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9())))
python
{ "resource": "" }
q17822
construct_zernike_lookuptable
train
def construct_zernike_lookuptable(zernike_indexes): """Return a lookup table of the sum-of-factorial part of the radial polynomial of the zernike indexes passed zernike_indexes - an Nx2 array of the Zernike polynomials to be computed. """ n_max = np.max(zernike_indexes[:,0]) factorial = np.ones((1 + n_max,), dtype=float) factorial[1:] = np.cumproduct(np.arange(1, 1 + n_max, dtype=float)) width = int(n_max//2 + 1) lut = np.zeros((zernike_indexes.shape[0],width), dtype=float) for idx, (n, m) in enumerate(zernike_indexes): alt = 1 npmh = (n+m)//2 nmmh = (n-m)//2 for k in range(0,nmmh+1): lut[idx,k] = \ (alt * factorial[n-k] / (factorial[k]*factorial[npmh-k]*factorial[nmmh-k])) alt = -alt return lut
python
{ "resource": "" }
q17823
score_zernike
train
def score_zernike(zf, radii, labels, indexes=None): """Score the output of construct_zernike_polynomials zf - the output of construct_zernike_polynomials which is I x J x K where K is the number of zernike polynomials computed radii - a vector of the radius of each of N labeled objects labels - a label matrix outputs a N x K matrix of the scores of each of the Zernikes for each labeled object. """ if indexes is None: indexes = np.arange(1,np.max(labels)+1,dtype=np.int32) else: indexes = np.array(indexes, dtype=np.int32) radii = np.asarray(radii, dtype=float) n = radii.size k = zf.shape[2] score = np.zeros((n,k)) if n == 0: return score areas = np.square(radii) areas *= np.pi for ki in range(k): zfk=zf[:,:,ki] real_score = scipy.ndimage.sum(zfk.real,labels,indexes) real_score = fixup_scipy_ndimage_result(real_score) imag_score = scipy.ndimage.sum(zfk.imag,labels,indexes) imag_score = fixup_scipy_ndimage_result(imag_score) # one_score = np.sqrt(real_score**2+imag_score**2) / areas np.square(real_score, out=real_score) np.square(imag_score, out=imag_score) one_score = real_score + imag_score np.sqrt(one_score, out=one_score) one_score /= areas score[:,ki] = one_score return score
python
{ "resource": "" }
q17824
slow_augmenting_row_reduction
train
def slow_augmenting_row_reduction(n, ii, jj, idx, count, x, y, u, v, c): '''Perform the augmenting row reduction step from the Jonker-Volgenaut algorithm n - the number of i and j in the linear assignment problem ii - the unassigned i jj - the j-index of every entry in c idx - the index of the first entry for each i count - the number of entries for each i x - the assignment of j to i y - the assignment of i to j u - the dual variable "u" which will be updated. It should be initialized to zero for the first reduction transfer. v - the dual variable "v" which will be reduced in-place c - the cost for each entry. returns the new unassigned i ''' ####################################### # # From Jonker: # # procedure AUGMENTING ROW REDUCTION; # begin # LIST: = {all unassigned rows}; # for all i in LIST do # repeat # ul:=min {c[i,j]-v[j] for j=l ...n}; # select j1 with c [i,j 1] - v[j 1] = u1; # u2:=min {c[i,j]-v[j] for j=l ...n,j< >jl} ; # select j2 with c [i,j2] - v [j2] = u2 and j2 < >j 1 ; # u[i]:=u2; # if ul <u2 then v[jl]:=v[jl]-(u2-ul) # else if jl is assigned then jl : =j2; # k:=y [jl]; if k>0 then x [k]:=0; x[i]:=jl; y [ j l ] : = i ; i:=k # until ul =u2 (* no reduction transfer *) or k=0 i~* augmentation *) # end ii = list(ii) k = 0 limit = len(ii) free = [] while k < limit: i = ii[k] k += 1 j = jj[idx[i]:(idx[i] + count[i])] uu = c[idx[i]:(idx[i] + count[i])] - v[j] order = np.lexsort([uu]) u1, u2 = uu[order[:2]] j1,j2 = j[order[:2]] i1 = y[j1] if u1 < u2: v[j1] = v[j1] - u2 + u1 elif i1 != n: j1 = j2 i1 = y[j1] if i1 != n: if u1 < u2: k -= 1 ii[k] = i1 else: free.append(i1) x[i] = j1 y[j1] = i return np.array(free,np.uint32)
python
{ "resource": "" }
q17825
collapse_degenerate_markers
train
def collapse_degenerate_markers(linkage_records): """Group all markers with no genetic distance as distinct features to generate a BED file with. Simple example with sixteen degenerate markers: >>> marker_features = [ ... ['36915_sctg_207_31842', 1, 0, 207, 31842], ... ['36941_sctg_207_61615', 1, 0, 207, 61615], ... ['36956_sctg_207_77757', 1, 0, 207, 77757], ... ['36957_sctg_207_78332', 1, 0, 207, 78332], ... ['36972_sctg_207_94039', 1, 0, 207, 94039], ... ['36788_sctg_207_116303', 1, 0.652, 207, 116303], ... ['36812_sctg_207_158925', 1, 1.25, 207, 158925], ... ['36819_sctg_207_165424', 1, 1.25, 207, 165424], ... ['36828_sctg_207_190813', 1, 1.25, 207, 190813], ... ['36830_sctg_207_191645', 1, 1.25, 207, 191645], ... ['36834_sctg_207_195961', 1, 1.25, 207, 195961], ... ['36855_sctg_207_233632', 1, 1.25, 207, 233632], ... ['36881_sctg_207_258658', 1, 1.25, 207, 258658], ... ['82072_sctg_486_41893', 1, 3.756, 486, 41893], ... ['85634_sctg_516_36614', 1, 3.756, 516, 36614], ... ['85638_sctg_516_50582', 1, 3.756, 516, 50582]] >>> len(marker_features) 16 >>> collapsed_features = collapse_degenerate_markers(marker_features) >>> len(collapsed_features) 5 The degenerate features (identical linkage group, genetic distance and original scaffold) are collapsed into a region: >>> collapsed_features[0] [1, 31842, 94039, 207] The format is [linkage group, start, end, original scaffold]. If a singleton (non-degenerate) feature is found, the region is simply a single point in the genome: >>> collapsed_features[1] [1, 116303, 116303, 207] so 'start' and 'end' are identical. Two markers are not considered degenerate if they belong to different original scaffolds, even if they are in terms of genetic linkage: >>> collapsed_features[2] [1, 158925, 258658, 207] >>> collapsed_features[3:] [[1, 41893, 41893, 486], [1, 36614, 50582, 516]] """ def degeneracy(linkage_record): linkage_group, genetic_distance, scaffold = ( linkage_record[1], linkage_record[2], linkage_record[3], ) return (linkage_group, genetic_distance, scaffold) degenerate_records = [] for _, degenerate_group in itertools.groupby( linkage_records, key=degeneracy ): group_list = list(degenerate_group) start_record, end_record = group_list[0], group_list[-1] assert (start_record[1], start_record[2], start_record[3]) == ( end_record[1], end_record[2], end_record[3], ) start_position = start_record[-1] end_position = end_record[-1] scaffold = start_record[3] linkage_group = start_record[1] record = [linkage_group, start_position, end_position, scaffold] degenerate_records.append(record) return degenerate_records
python
{ "resource": "" }
q17826
linkage_group_ordering
train
def linkage_group_ordering(linkage_records): """Convert degenerate linkage records into ordered info_frags-like records for comparison purposes. Simple example: >>> linkage_records = [ ... ['linkage_group_1', 31842, 94039, 'sctg_207'], ... ['linkage_group_1', 95303, 95303, 'sctg_207'], ... ['linkage_group_2', 15892, 25865, 'sctg_308'], ... ['linkage_group_2', 41893, 41893, 'sctg_486'], ... ['linkage_group_3', 36614, 50582, 'sctg_516'], ... ] >>> ordering = linkage_group_ordering(linkage_records) Each key of the record is a newly-formed 'scaffold' (linkage group): >>> sorted(ordering.keys()) ['linkage_group_1', 'linkage_group_2', 'linkage_group_3'] Records are in the form [init_contig, frag_id, start, end, orientation]. Since fragment ids are meaningless in non-HiC contexts a negative identifier is set so it is understood that region was added due to linkage data (-1 is for recovering data after first-pass polishing and -2 is for sequence insertions after long read based polishing). >>> ordering['linkage_group_1'] [['sctg_207', -3, 31842, 94039, 1], ['sctg_207', -3, 95303, 95303, 1]] >>> ordering['linkage_group_2'] [['sctg_308', -3, 15892, 25865, 1], ['sctg_486', -3, 41893, 41893, 1]] Orientations are always set to 1 by default. >>> ordering['linkage_group_3'] [['sctg_516', -3, 36614, 50582, 1]] """ new_records = dict() for lg_name, linkage_group in itertools.groupby( linkage_records, operator.itemgetter(0) ): new_records[lg_name] = [] for record in linkage_group: init_contig = record[-1] start = record[1] end = record[2] new_record = [init_contig, -3, start, end, 1] new_records[lg_name].append(new_record) return new_records
python
{ "resource": "" }
q17827
compare_orderings
train
def compare_orderings(info_frags_records, linkage_orderings): """Given linkage groups and info_frags records, link pseudo-chromosomes to scaffolds based on the initial contig composition of each group. Because info_frags records are usually richer and may contain contigs not found in linkage groups, those extra sequences are discarded. Example with two linkage groups and two chromosomes: >>> linkage_orderings = { ... 'linkage_group_1': [ ... ['sctg_516', -3, 36614, 50582, 1], ... ['sctg_486', -3, 41893, 41893, 1], ... ['sctg_486', -3, 50054, 62841, 1], ... ['sctg_207', -3, 31842, 94039, 1], ... ['sctg_558', -3, 51212, 54212, 1], ... ], ... 'linkage_group_2': [ ... ['sctg_308', -3, 15892, 25865, 1], ... ['sctg_842', -3, 0, 8974, 1], ... ['sctg_994', -3, 0, 81213, 1], ... ], ... } >>> info_frags = { ... 'scaffold_A': [ ... ['sctg_308', 996, 15892, 25865, 1], ... ['sctg_778', 1210, 45040, 78112, -1], ... ['sctg_842', 124, 0, 8974, 1], ... ], ... 'scaffold_B': [ ... ['sctg_516', 5, 0, 38000, 1], ... ['sctg_486', 47, 42050, 49000, 1], ... ['sctg_1755', 878, 95001, 10844, -1], ... ['sctg_842', 126, 19000, 26084, 1], ... ['sctg_207', 705, 45500, 87056, 1], ... ], ... 'scaffold_C': [ ... ['sctg_558', 745, 50045, 67851, 1], ... ['sctg_994', 12, 74201, 86010, -1], ... ], ... } >>> matching_pairs = compare_orderings(info_frags, linkage_orderings) >>> matching_pairs['scaffold_B'] (3, 'linkage_group_1', {'sctg_558': 'sctg_207'}) >>> matching_pairs['scaffold_A'] (2, 'linkage_group_2', {'sctg_994': 'sctg_842'}) """ scaffolds = info_frags_records.keys() linkage_groups = linkage_orderings.keys() best_matching_table = dict() for scaffold, linkage_group in itertools.product( scaffolds, linkage_groups ): lg_ordering = [ init_contig for init_contig, _ in itertools.groupby( linkage_orderings[linkage_group], operator.itemgetter(0) ) ] scaffold_ordering = [ init_contig for init_contig, bin_group in itertools.groupby( info_frags_records[scaffold], operator.itemgetter(0) ) if init_contig in lg_ordering ] overlap = set(lg_ordering).intersection(set(scaffold_ordering)) missing_locations = dict() for missing_block in sorted(set(lg_ordering) - set(overlap)): for i, init_contig in enumerate(lg_ordering): if init_contig == missing_block: try: block_before = lg_ordering[i - 1] except IndexError: block_before = "beginning" missing_locations[missing_block] = block_before try: if len(overlap) > best_matching_table[scaffold][0]: best_matching_table[scaffold] = ( len(overlap), linkage_group, missing_locations, ) except KeyError: best_matching_table[scaffold] = ( len(overlap), linkage_group, missing_locations, ) return best_matching_table
python
{ "resource": "" }
q17828
get_missing_blocks
train
def get_missing_blocks(info_frags_records, matching_pairs, linkage_orderings): """Get missing blocks in a scaffold based on the genetic map order. Given matching scaffold blocks/genetic map blocks (based on restriction sites and SNP markers, respectively), move around the scaffold blocks such that they map the genetic map order. Parameters ---------- info_frags_records : dict A dictionary representing the scaffolds and their block order as described in an info_frags.txt file matching_pairs : dict A list of best matching pairs in the form (scaffold_block, linkage group) linkage_orderings : dict A dictionary representing the genetic map and the linkage groups as described in csv file. Example ------- >>> linkage_orderings = { ... 'linkage_group_1': [ ... ['sctg_516', -3, 36614, 50582, 1], ... ['sctg_486', -3, 41893, 41893, 1], ... ['sctg_486', -3, 50054, 62841, 1], ... ['sctg_207', -3, 31842, 94039, 1], ... ['sctg_558', -3, 51212, 54212, 1], ... ], ... 'linkage_group_2': [ ... ['sctg_308', -3, 15892, 25865, 1], ... ['sctg_842', -3, 0, 8974, 1], ... ['sctg_994', -3, 0, 81213, 1], ... ], ... } >>> info_frags = { ... 'scaffold_A': [ ... ['sctg_308', 996, 15892, 25865, 1], ... ['sctg_778', 1210, 45040, 78112, -1], ... ['sctg_842', 124, 0, 8974, 1], ... ], ... 'scaffold_B': [ ... ['sctg_516', 5, 0, 38000, 1], ... ['sctg_486', 47, 42050, 49000, 1], ... ['sctg_1755', 878, 95001, 10844, -1], ... ['sctg_842', 126, 19000, 26084, 1], ... ['sctg_207', 705, 45500, 87056, 1], ... ], ... 'scaffold_C': [ ... ['sctg_558', 745, 50045, 67851, 1], ... ['sctg_994', 12, 74201, 86010, -1], ... ], ... } >>> matching_pairs = compare_orderings(info_frags, linkage_orderings) >>> new_records = get_missing_blocks(info_frags, matching_pairs, ... linkage_orderings) >>> for my_bin in new_records['scaffold_A']: ... print(list(my_bin)) ... ['sctg_308', 996, 15892, 25865, 1] ['sctg_778', 1210, 45040, 78112, -1] ['sctg_842', 124, 0, 8974, 1] ['sctg_842', 126, 19000, 26084, 1] ['sctg_994', 12, 74201, 86010, -1] >>> for my_bin in new_records['scaffold_B']: ... print(list(my_bin)) ... ['sctg_516', 5, 0, 38000, 1] ['sctg_486', 47, 42050, 49000, 1] ['sctg_1755', 878, 95001, 10844, -1] ['sctg_207', 705, 45500, 87056, 1] ['sctg_558', 745, 50045, 67851, 1] """ import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logger = logging.getLogger() logger.setLevel(logging.DEBUG) touched_lgs = set() def record_length(key_record_tuple): return len(key_record_tuple[1]) new_scaffolds = copy.deepcopy(info_frags_records) for scaffold_name in collections.OrderedDict( sorted(new_scaffolds.items(), key=record_length, reverse=True) ): scaffold = new_scaffolds[scaffold_name] new_scaffold = [] corresponding_lg = matching_pairs[scaffold_name][1] if corresponding_lg in touched_lgs: continue else: touched_lgs.add(corresponding_lg) scaffold_block_names = {my_bin[0] for my_bin in scaffold} lg_block_names = [ my_block[0] for my_block in linkage_orderings[corresponding_lg] ] touched_bins = set() for lg_block_name in lg_block_names: if lg_block_name in scaffold_block_names: for my_bin in scaffold: if tuple(my_bin) in new_scaffold: continue elif ( my_bin[0] == lg_block_name or my_bin[0] not in lg_block_names ): new_scaffold.append(tuple(my_bin)) touched_bins.add(tuple(my_bin)) else: break for other_name, other_scaffold in new_scaffolds.items(): if other_name == scaffold_name: continue i = 0 for my_bin in other_scaffold: if tuple(my_bin) in new_scaffold: i += 1 continue elif my_bin[0] == lg_block_name: moved_bin = tuple(other_scaffold.pop(i)) new_scaffold.append(tuple(moved_bin)) touched_bins.add(tuple(moved_bin)) i -= 1 i += 1 for remaining_bin in scaffold: if tuple(remaining_bin) not in touched_bins: new_scaffold.append(tuple(remaining_bin)) touched_bins.add(tuple(remaining_bin)) if len(new_scaffold) > 0: new_scaffolds[scaffold_name] = new_scaffold return new_scaffolds
python
{ "resource": "" }
q17829
parse_info_frags
train
def parse_info_frags(info_frags): """Import an info_frags.txt file and return a dictionary where each key is a newly formed scaffold and each value is the list of bins and their origin on the initial scaffolding. """ new_scaffolds = {} with open(info_frags, "r") as info_frags_handle: current_new_contig = None for line in info_frags_handle: if line.startswith(">"): current_new_contig = str(line[1:-1]) new_scaffolds[current_new_contig] = [] elif line.startswith("init_contig"): pass else: (init_contig, id_frag, orientation, pos_start, pos_end) = str( line[:-1] ).split("\t") start = int(pos_start) end = int(pos_end) ori = int(orientation) fragid = int(id_frag) assert start < end assert ori in {-1, 1} new_scaffolds[current_new_contig].append( [init_contig, fragid, start, end, ori] ) return new_scaffolds
python
{ "resource": "" }
q17830
format_info_frags
train
def format_info_frags(info_frags): """A function to seamlessly run on either scaffold dictionaries or info_frags.txt files without having to check the input first. """ if isinstance(info_frags, dict): return info_frags else: try: scaffolds = parse_info_frags(info_frags) return scaffolds except OSError: print("Error when opening info_frags.txt") raise
python
{ "resource": "" }
q17831
plot_info_frags
train
def plot_info_frags(scaffolds): """A crude way to visualize new scaffolds according to their origin on the initial scaffolding. Each scaffold spawns a new plot. Orientations are represented by different colors. """ scaffolds = format_info_frags(scaffolds) for name, scaffold in scaffolds.items(): plt.figure() xs = range(len(scaffold)) color = [] names = {} ys = [] for my_bin in scaffold: current_color = "r" if my_bin[4] > 0 else "g" color += [current_color] name = my_bin[0] if name in names: ys.append(names[name]) else: names[name] = len(names) ys.append(names[name]) plt.scatter(xs, ys, c=color) plt.show()
python
{ "resource": "" }
q17832
remove_spurious_insertions
train
def remove_spurious_insertions(scaffolds): """Remove all bins whose left and right neighbors belong to the same, different scaffold. Example with three such insertions in two different scaffolds: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 0, 0, 100, 1], ... ["contig1", 1, 100, 200, 1], ... ["contig23", 53, 1845, 2058, -1], # <-- insertion ... ["contig1", 4, 254, 408, 1], ... ["contig1", 7, 805, 1253, 1], ... ["contig5", 23, 1500, 1605, -1], ... ["contig65", 405, 32145, 45548, -1], # <-- insertion ... ["contig5", 22, 1385, 1499, -1], ... ], ... "scaffold2": [ ... ["contig8", 0, 0, 250, 1], ... ["contig17", 2454, 8754, -1], # <-- insertion ... ["contig8", 2, 320, 480, 1], ... ], ... } >>> new_scaffolds = remove_spurious_insertions(scaffolds) >>> for my_bin in new_scaffolds['scaffold1']: ... print(my_bin) ... ['contig1', 0, 0, 100, 1] ['contig1', 1, 100, 200, 1] ['contig1', 4, 254, 408, 1] ['contig1', 7, 805, 1253, 1] ['contig5', 23, 1500, 1605, -1] ['contig5', 22, 1385, 1499, -1] >>> for my_bin in new_scaffolds['scaffold2']: ... print(my_bin) ... ['contig8', 0, 0, 250, 1] ['contig8', 2, 320, 480, 1] """ scaffolds = format_info_frags(scaffolds) new_scaffolds = {} for name, scaffold in scaffolds.items(): new_scaffold = [] if len(scaffold) > 2: for i in range(len(scaffold)): # First take care of edge cases: *-- or --* if i == 0: if not ( scaffold[i][0] != scaffold[i + 1][0] and scaffold[i + 1][0] == scaffold[i + 2][0] ): new_scaffold.append(scaffold[i]) elif i == len(scaffold) - 1: if not ( scaffold[i][0] != scaffold[i - 1][0] and scaffold[i - 1][0] == scaffold[i - 2][0] ): new_scaffold.append(scaffold[i]) # Otherwise, looking for -*- else: if not ( scaffold[i - 1][0] == scaffold[i + 1][0] and scaffold[i - 1][0] != scaffold[i][0] ): new_scaffold.append(scaffold[i]) else: # Can't remove insertions if 2 bins or less new_scaffold = copy.deepcopy(scaffold) new_scaffolds[name] = new_scaffold return new_scaffolds
python
{ "resource": "" }
q17833
rearrange_intra_scaffolds
train
def rearrange_intra_scaffolds(scaffolds): """Rearranges all bins within each scaffold such that all bins belonging to the same initial contig are grouped together in the same order. When two such groups are found, the smaller one is moved to the larger one. """ scaffolds = format_info_frags(scaffolds) new_scaffolds = {} ordering = dict() for name, scaffold in scaffolds.items(): new_scaffold = [] ordering = dict() order = 0 my_blocks = [] for _, my_block in itertools.groupby(scaffold, operator.itemgetter(0)): my_bins = list(my_block) my_blocks.append(my_bins) block_length = len(my_bins) block_name = my_bins[0][0] if block_name in ordering.keys(): if block_length > ordering[block_name][1]: ordering[block_name] = (order, block_length) else: ordering[block_name] = (order, block_length) order += 1 def block_order(block): return ordering[block[0][0]][0] for my_block in sorted(my_blocks, key=block_order): for my_bin in my_block: new_scaffold.append(my_bin) new_scaffolds[name] = copy.deepcopy(new_scaffold) return new_scaffolds
python
{ "resource": "" }
q17834
write_fasta
train
def write_fasta( init_fasta, info_frags, output=DEFAULT_NEW_GENOME_NAME, junction=False ): """Convert an info_frags.txt file into a fasta file given a reference. Optionally adds junction sequences to reflect the possibly missing base pairs between two newly joined scaffolds. """ init_genome = { record.id: record.seq for record in SeqIO.parse(init_fasta, "fasta") } my_new_records = [] with open(info_frags, "r") as info_frags_handle: current_seq = "" current_id = None previous_contig = None for line in info_frags_handle: if line.startswith(">"): previous_contig = None if current_id is not None: new_record = SeqRecord( current_seq, id=current_id, description="" ) my_new_records.append(new_record) current_seq = "" current_id = str(line[1:]) elif line.startswith("init_contig"): previous_contig = None else: (init_contig, _, orientation, pos_start, pos_end) = str( line[:-1] ).split("\t") start = int(pos_start) end = int(pos_end) ori = int(orientation) assert start < end assert ori in {-1, 1} seq_to_add = init_genome[init_contig][start:end] if ori == 1: current_seq += seq_to_add elif ori == -1: current_seq += seq_to_add.reverse_complement() if junction and previous_contig not in {init_contig, None}: error_was_raised = False try: extra_seq = Seq(junction, IUPAC.ambiguous_dna) current_seq = extra_seq + current_seq except TypeError: if not error_was_raised: print("Invalid junction sequence") error_was_raised = True previous_contig = init_contig new_record = SeqRecord(current_seq, id=current_id, description="") my_new_records.append(new_record) SeqIO.write(my_new_records, output, "fasta")
python
{ "resource": "" }
q17835
is_block
train
def is_block(bin_list): """Check if a bin list has exclusively consecutive bin ids. """ id_set = set((my_bin[1] for my_bin in bin_list)) start_id, end_id = min(id_set), max(id_set) return id_set == set(range(start_id, end_id + 1))
python
{ "resource": "" }
q17836
internal2external_grad
train
def internal2external_grad(xi, bounds): """ Calculate the internal to external gradiant Calculates the partial of external over internal """ ge = np.empty_like(xi) for i, (v, bound) in enumerate(zip(xi, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints ge[i] = 1.0 elif b == None: # only min ge[i] = v / np.sqrt(v ** 2 + 1) elif a == None: # only max ge[i] = -v / np.sqrt(v ** 2 + 1) else: # both min and max ge[i] = (b - a) * np.cos(v) / 2. return ge
python
{ "resource": "" }
q17837
internal2external
train
def internal2external(xi, bounds): """ Convert a series of internal variables to external variables""" xe = np.empty_like(xi) for i, (v, bound) in enumerate(zip(xi, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints xe[i] = v elif b == None: # only min xe[i] = a - 1. + np.sqrt(v ** 2. + 1.) elif a == None: # only max xe[i] = b + 1. - np.sqrt(v ** 2. + 1.) else: # both min and max xe[i] = a + ((b - a) / 2.) * (np.sin(v) + 1.) return xe
python
{ "resource": "" }
q17838
external2internal
train
def external2internal(xe, bounds): """ Convert a series of external variables to internal variables""" xi = np.empty_like(xe) for i, (v, bound) in enumerate(zip(xe, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints xi[i] = v elif b == None: # only min xi[i] = np.sqrt((v - a + 1.) ** 2. - 1) elif a == None: # only max xi[i] = np.sqrt((b - v + 1.) ** 2. - 1) else: # both min and max xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.) return xi
python
{ "resource": "" }
q17839
calc_cov_x
train
def calc_cov_x(infodic, p): """ Calculate cov_x from fjac, ipvt and p as is done in leastsq """ fjac = infodic["fjac"] ipvt = infodic["ipvt"] n = len(p) # adapted from leastsq function in scipy/optimize/minpack.py perm = np.take(np.eye(n), ipvt - 1, 0) r = np.triu(np.transpose(fjac)[:n, :]) R = np.dot(r, perm) try: cov_x = np.linalg.inv(np.dot(np.transpose(R), R)) except LinAlgError: cov_x = None return cov_x
python
{ "resource": "" }
q17840
leastsqbound
train
def leastsqbound(func, x0, bounds, args=(), **kw): """ Constrained multivariant Levenberg-Marquard optimization Minimize the sum of squares of a given function using the Levenberg-Marquard algorithm. Contraints on parameters are inforced using variable transformations as described in the MINUIT User's Guide by Fred James and Matthias Winkler. Parameters: * func functions to call for optimization. * x0 Starting estimate for the minimization. * bounds (min,max) pair for each element of x, defining the bounds on that parameter. Use None for one of min or max when there is no bound in that direction. * args Any extra arguments to func are places in this tuple. Returns: (x,{cov_x,infodict,mesg},ier) Return is described in the scipy.optimize.leastsq function. x and con_v are corrected to take into account the parameter transformation, infodic is not corrected. Additional keyword arguments are passed directly to the scipy.optimize.leastsq algorithm. """ # check for full output if "full_output" in kw and kw["full_output"]: full = True else: full = False # convert x0 to internal variables i0 = external2internal(x0, bounds) # perfrom unconstrained optimization using internal variables r = leastsq(err, i0, args=(bounds, func, args), **kw) # unpack return convert to external variables and return if full: xi, cov_xi, infodic, mesg, ier = r xe = internal2external(xi, bounds) cov_xe = i2e_cov_x(xi, bounds, cov_xi) # XXX correct infodic 'fjac','ipvt', and 'qtf' return xe, cov_xe, infodic, mesg, ier else: xi, ier = r xe = internal2external(xi, bounds) return xe, ier
python
{ "resource": "" }
q17841
GitHubInterface.start_review
train
def start_review(self): """Mark our review as started.""" if self.set_status: self.github_repo.create_status( state="pending", description="Static analysis in progress.", context="inline-plz", sha=self.last_sha, )
python
{ "resource": "" }
q17842
GitHubInterface.finish_review
train
def finish_review(self, success=True, error=False): """Mark our review as finished.""" if self.set_status: if error: self.github_repo.create_status( state="error", description="Static analysis error! inline-plz failed to run.", context="inline-plz", sha=self.last_sha, ) elif success: self.github_repo.create_status( state="success", description="Static analysis complete! No errors found in your PR.", context="inline-plz", sha=self.last_sha, ) else: self.github_repo.create_status( state="failure", description="Static analysis complete! Found errors in your PR.", context="inline-plz", sha=self.last_sha, )
python
{ "resource": "" }
q17843
GitHubInterface.out_of_date
train
def out_of_date(self): """Check if our local latest sha matches the remote latest sha""" try: latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha print("Latest remote sha: {}".format(latest_remote_sha)) try: print("Ratelimit remaining: {}".format(self.github.ratelimit_remaining)) except Exception: print("Failed to look up ratelimit remaining") return self.last_sha != latest_remote_sha except IndexError: return False
python
{ "resource": "" }
q17844
GitHubInterface.position
train
def position(self, message): """Calculate position within the PR, which is not the line number""" if not message.line_number: message.line_number = 1 for patched_file in self.patch: target = patched_file.target_file.lstrip("b/") if target == message.path: offset = 1 for hunk in patched_file: for position, hunk_line in enumerate(hunk): if hunk_line.target_line_no == message.line_number: if not hunk_line.is_added: # if the line isn't an added line, we don't want to comment on it return return position + offset offset += len(hunk) + 1
python
{ "resource": "" }
q17845
abs_contact_2_coo_file
train
def abs_contact_2_coo_file(abs_contact_file, coo_file): """Convert contact maps between old-style and new-style formats. A legacy function that converts contact maps from the older GRAAL format to the simpler instaGRAAL format. This is useful with datasets generated by Hi-C box. Parameters ---------- abs_contact_file : str, file or pathlib.Path The input old-style contact map. coo_file : str, file, or pathlib.Path The output path to the generated contact map; must be writable. """ sparse_dict = dict() h = open(abs_contact_file, "r") all_lines = h.readlines() n_lines = len(all_lines) for i in range(1, n_lines): line = all_lines[i] dat = line.split() mates = [int(dat[0]), int(dat[1])] mates.sort() f1 = mates[0] - 1 f2 = mates[1] - 1 if f1 in sparse_dict: if f2 in sparse_dict[f1]: sparse_dict[f1][f2] += 1 else: sparse_dict[f1][f2] = 1 else: sparse_dict[f1] = dict() sparse_dict[f1][f2] = 1 keys = list(sparse_dict.keys()) keys.sort() h.close() h_coo = open(coo_file, "w") h_coo.write("%s\t%s\t%s\n" % ("id_frag_a", "id_frag_b", "n_contact")) for fa in keys: d_fb = sparse_dict[fa] keys_b = list(d_fb.keys()) keys_b.sort() for fb in keys_b: nc = d_fb[fb] h_coo.write("%s\t%s\t%s\n" % (str(fa), str(fb), str(nc))) h_coo.close() h.close()
python
{ "resource": "" }
q17846
fill_sparse_pyramid_level
train
def fill_sparse_pyramid_level(pyramid_handle, level, contact_file, nfrags): """Fill a level with sparse contact map data Fill values from the simple text matrix file to the hdf5-based pyramid level with contact data. Parameters ---------- pyramid_handle : h5py.File The hdf5 file handle containing the whole dataset. level : int The level (resolution) to be filled with contact data. contact_file : str, file or pathlib.Path The binned contact map file to be converted to hdf5 data. nfrags : int The number of fragments/bins in that specific level. """ sparse_dict = dict() h = open(contact_file, "r") all_lines = h.readlines() n_lines = len(all_lines) for i in range(1, n_lines): line = all_lines[i] dat = line.split() mates = [int(dat[0]), int(dat[1])] nc = int(dat[2]) mates.sort() f1 = mates[0] f2 = mates[1] if f1 in sparse_dict: if f2 in sparse_dict[f1]: sparse_dict[f1][f2] += nc else: sparse_dict[f1][f2] = nc else: sparse_dict[f1] = dict() sparse_dict[f1][f2] = nc keys = list(sparse_dict.keys()) keys.sort() out_r = [] out_c = [] out_d = [] for r in keys: data = sparse_dict[r] for c in list(data.keys()): out_r.append(r) out_c.append(c) out_d.append(data[c]) n_on_pxls = len(out_d) level_hdf5 = pyramid_handle.create_group(str(level)) data_2_sparse = level_hdf5.create_dataset("data", (3, n_on_pxls), "i") data_nfrags = level_hdf5.create_dataset("nfrags", (1, 1), "i") np_csr = np.zeros((3, n_on_pxls), dtype=np.int32) np_csr[0, :] = out_r np_csr[1, :] = out_c np_csr[2, :] = out_d data_2_sparse[0, :] = out_r data_2_sparse[1, :] = out_c data_2_sparse[2, :] = out_d data_nfrags[:] = nfrags
python
{ "resource": "" }
q17847
init_frag_list
train
def init_frag_list(fragment_list, new_frag_list): """Adapt the original fragment list to fit the build function requirements Parameters ---------- fragment_list : str, file or pathlib.Path The input fragment list. new_frag_list : str, file or pathlib.Path The output fragment list to be written. Returns ------- i : int The number of records processed this way. """ handle_frag_list = open(fragment_list, "r") handle_new_frag_list = open(new_frag_list, "w") handle_new_frag_list.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( "id", "chrom", "start_pos", "end_pos", "size", "gc_content", "accu_frag", "frag_start", "frag_end", ) ) handle_frag_list.readline() i = 0 while 1: line_frag = handle_frag_list.readline() if not line_frag: handle_frag_list.close() handle_new_frag_list.close() break i += 1 data = line_frag.split("\t") id_init = data[0] contig_name = data[1] start_pos = data[2] end_pos = data[3] length_kb = data[4] gc_content = str(float(data[5])) accu_frag = str(1) frag_start = id_init frag_end = id_init handle_new_frag_list.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( id_init, contig_name, start_pos, end_pos, length_kb, gc_content, accu_frag, frag_start, frag_end, ) ) return i
python
{ "resource": "" }
q17848
pyramid.zoom_in_pixel
train
def zoom_in_pixel(self, curr_pixel): """ return the curr_frag at a higher resolution""" low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] if level > 0: str_level = str(level) low_sub_low = self.spec_level[str_level]["fragments_dict"][ low_frag ]["sub_low_index"] low_sub_high = self.spec_level[str_level]["fragments_dict"][ low_frag ]["sub_high_index"] high_sub_low = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_low_index"] high_sub_high = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_high_index"] vect = [low_sub_low, low_sub_high, high_sub_low, high_sub_high] new_pix_low = min(vect) new_pix_high = max(vect) new_level = level - 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
python
{ "resource": "" }
q17849
pyramid.zoom_out_pixel
train
def zoom_out_pixel(self, curr_pixel): """ return the curr_frag at a lower resolution""" low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] str_level = str(level) if level < self.n_level - 1: low_super = self.spec_level[str_level]["fragments_dict"][low_frag][ "super_index" ] high_super = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_index"] new_pix_low = min([low_super, high_super]) new_pix_high = max([low_super, high_super]) new_level = level + 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
python
{ "resource": "" }
q17850
pyramid.zoom_in_area
train
def zoom_in_area(self, area): """ zoom in area""" x = area[0] y = area[1] level = x[2] logger.debug("x = {}".format(x)) logger.debug("y = {}".format(y)) logger.debug("level = {}".format(level)) if level == y[2] and level > 0: new_level = level - 1 high_x = self.zoom_in_pixel(x) high_y = self.zoom_in_pixel(y) new_x = [ min([high_x[0], high_y[0]]), min([high_x[1], high_y[1]]), new_level, ] new_y = [ max([high_x[0], high_y[0]]), max([high_x[1], high_y[1]]), new_level, ] new_area = [new_x, new_y] else: new_area = area return new_area
python
{ "resource": "" }
q17851
load_config
train
def load_config(args, config_path=".inlineplz.yml"): """Load inline-plz config from yaml config file with reasonable defaults.""" config = {} try: with open(config_path) as configfile: config = yaml.safe_load(configfile) or {} if config: print("Loaded config from {}".format(config_path)) pprint.pprint(config) except (IOError, OSError, yaml.parser.ParserError): traceback.print_exc() args = update_from_config(args, config) args.ignore_paths = args.__dict__.get("ignore_paths") or [ "node_modules", ".git", ".tox", "godeps", "vendor", "site-packages", "venv", ".env", "spec", "migrate", "bin", "fixtures", "cassettes", ".cache", ".idea", ".pytest_cache", "__pycache__", "dist", ] if config_path != ".inlineplz.yml": return args # fall back to config_dir inlineplz yaml if we didn't find one locally if args.config_dir and not config: new_config_path = os.path.join(args.config_dir, config_path) if os.path.exists(new_config_path): return load_config(args, new_config_path) return args
python
{ "resource": "" }
q17852
LinterRunner.cleanup
train
def cleanup(): """Delete standard installation directories.""" for install_dir in linters.INSTALL_DIRS: try: shutil.rmtree(install_dir, ignore_errors=True) except Exception: print( "{0}\nFailed to delete {1}".format( traceback.format_exc(), install_dir ) ) sys.stdout.flush()
python
{ "resource": "" }
q17853
CloudDatabaseManager.get
train
def get(self, item): """ This additional code is necessary to properly return the 'volume' attribute of the instance as a CloudDatabaseVolume object instead of a raw dict. """ resource = super(CloudDatabaseManager, self).get(item) resource.volume = CloudDatabaseVolume(resource, resource.volume) return resource
python
{ "resource": "" }
q17854
CloudDatabaseManager._create_body
train
def _create_body(self, name, flavor=None, volume=None, databases=None, users=None, version=None, type=None): """ Used to create the dict required to create a Cloud Database instance. """ if flavor is None: flavor = 1 flavor_ref = self.api._get_flavor_ref(flavor) if volume is None: volume = 1 if databases is None: databases = [] if users is None: users = [] body = {"instance": { "name": name, "flavorRef": flavor_ref, "volume": {"size": volume}, "databases": databases, "users": users, }} if type is not None or version is not None: required = (type, version) if all(required): body['instance']['datastore'] = {"type": type, "version": version} else: raise exc.MissingCloudDatabaseParameter("Specifying a datastore" " requires both the datastore type as well as the version.") return body
python
{ "resource": "" }
q17855
CloudDatabaseManager.list_backups
train
def list_backups(self, instance=None, marker=0, limit=20): """ Returns a paginated list of backups, or just for a particular instance. """ return self.api._backup_manager.list(instance=instance, limit=limit, marker=marker)
python
{ "resource": "" }
q17856
CloudDatabaseManager._list_backups_for_instance
train
def _list_backups_for_instance(self, instance, marker=0, limit=20): """ Instance-specific backups are handled through the instance manager, not the backup manager. """ uri = "/%s/%s/backups?limit=%d&marker=%d" % (self.uri_base, utils.get_id(instance), int(limit), int(marker)) resp, resp_body = self.api.method_get(uri) mgr = self.api._backup_manager return [CloudDatabaseBackup(mgr, backup) for backup in resp_body.get("backups")]
python
{ "resource": "" }
q17857
CloudDatabaseUserManager.list_user_access
train
def list_user_access(self, user): """ Returns a list of all database names for which the specified user has access rights. """ user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) try: resp, resp_body = self.api.method_get(uri) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user) dbs = resp_body.get("databases", {}) return [CloudDatabaseDatabase(self, db) for db in dbs]
python
{ "resource": "" }
q17858
CloudDatabaseUserManager.grant_user_access
train
def grant_user_access(self, user, db_names, strict=True): """ Gives access to the databases listed in `db_names` to the user. You may pass in either a single db or a list of dbs. If any of the databases do not exist, a NoSuchDatabase exception will be raised, unless you specify `strict=False` in the call. """ user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) db_names = self._get_db_names(db_names, strict=strict) dbs = [{"name": db_name} for db_name in db_names] body = {"databases": dbs} try: resp, resp_body = self.api.method_put(uri, body=body) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user)
python
{ "resource": "" }
q17859
CloudDatabaseBackupManager.list
train
def list(self, instance=None, limit=20, marker=0): """ Return a paginated list of backups, or just for a particular instance. """ if instance is None: return super(CloudDatabaseBackupManager, self).list() return self.api._manager._list_backups_for_instance(instance, limit=limit, marker=marker)
python
{ "resource": "" }
q17860
CloudDatabaseInstance.list_databases
train
def list_databases(self, limit=None, marker=None): """Returns a list of the names of all databases for this instance.""" return self._database_manager.list(limit=limit, marker=marker)
python
{ "resource": "" }
q17861
CloudDatabaseInstance.list_users
train
def list_users(self, limit=None, marker=None): """Returns a list of the names of all users for this instance.""" return self._user_manager.list(limit=limit, marker=marker)
python
{ "resource": "" }
q17862
CloudDatabaseInstance.get_user
train
def get_user(self, name): """ Finds the user in this instance with the specified name, and returns a CloudDatabaseUser object. If no match is found, a NoSuchDatabaseUser exception is raised. """ try: return self._user_manager.get(name) except exc.NotFound: raise exc.NoSuchDatabaseUser("No user by the name '%s' exists." % name)
python
{ "resource": "" }
q17863
CloudDatabaseInstance.get_database
train
def get_database(self, name): """ Finds the database in this instance with the specified name, and returns a CloudDatabaseDatabase object. If no match is found, a NoSuchDatabase exception is raised. """ try: return [db for db in self.list_databases() if db.name == name][0] except IndexError: raise exc.NoSuchDatabase("No database by the name '%s' exists." % name)
python
{ "resource": "" }
q17864
CloudDatabaseInstance.delete_database
train
def delete_database(self, name_or_obj): """ Deletes the specified database. If no database by that name exists, no exception will be raised; instead, nothing at all is done. """ name = utils.get_name(name_or_obj) self._database_manager.delete(name)
python
{ "resource": "" }
q17865
CloudDatabaseInstance.delete_user
train
def delete_user(self, user): """ Deletes the specified user. If no user by that name exists, no exception will be raised; instead, nothing at all is done. """ name = utils.get_name(user) self._user_manager.delete(name)
python
{ "resource": "" }
q17866
CloudDatabaseInstance.enable_root_user
train
def enable_root_user(self): """ Enables login from any host for the root user and provides the user with a generated root password. """ uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_post(uri) return body["user"]["password"]
python
{ "resource": "" }
q17867
CloudDatabaseInstance.root_user_status
train
def root_user_status(self): """ Returns True or False, depending on whether the root user for this instance has been enabled. """ uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_get(uri) return body["rootEnabled"]
python
{ "resource": "" }
q17868
CloudDatabaseInstance.resize
train
def resize(self, flavor): """Set the size of this instance to a different flavor.""" # We need the flavorRef, not the flavor or size. flavorRef = self.manager.api._get_flavor_ref(flavor) body = {"flavorRef": flavorRef} self.manager.action(self, "resize", body=body)
python
{ "resource": "" }
q17869
CloudDatabaseInstance.resize_volume
train
def resize_volume(self, size): """Changes the size of the volume for this instance.""" curr_size = self.volume.size if size <= curr_size: raise exc.InvalidVolumeResize("The new volume size must be larger " "than the current volume size of '%s'." % curr_size) body = {"volume": {"size": size}} self.manager.action(self, "resize", body=body)
python
{ "resource": "" }
q17870
CloudDatabaseInstance.list_backups
train
def list_backups(self, limit=20, marker=0): """ Returns a paginated list of backups for this instance. """ return self.manager._list_backups_for_instance(self, limit=limit, marker=marker)
python
{ "resource": "" }
q17871
CloudDatabaseInstance.create_backup
train
def create_backup(self, name, description=None): """ Creates a backup of this instance, giving it the specified name along with an optional description. """ return self.manager.create_backup(self, name, description=description)
python
{ "resource": "" }
q17872
CloudDatabaseClient.list_databases
train
def list_databases(self, instance, limit=None, marker=None): """Returns all databases for the specified instance.""" return instance.list_databases(limit=limit, marker=marker)
python
{ "resource": "" }
q17873
CloudDatabaseClient.create_database
train
def create_database(self, instance, name, character_set=None, collate=None): """Creates a database with the specified name on the given instance.""" return instance.create_database(name, character_set=character_set, collate=collate)
python
{ "resource": "" }
q17874
CloudDatabaseClient.list_users
train
def list_users(self, instance, limit=None, marker=None): """Returns all users for the specified instance.""" return instance.list_users(limit=limit, marker=marker)
python
{ "resource": "" }
q17875
CloudDatabaseClient.grant_user_access
train
def grant_user_access(self, instance, user, db_names, strict=True): """ Gives access to the databases listed in `db_names` to the user on the specified instance. """ return instance.grant_user_access(user, db_names, strict=strict)
python
{ "resource": "" }
q17876
CloudDatabaseClient.revoke_user_access
train
def revoke_user_access(self, instance, user, db_names, strict=True): """ Revokes access to the databases listed in `db_names` for the user on the specified instance. """ return instance.revoke_user_access(user, db_names, strict=strict)
python
{ "resource": "" }
q17877
CloudDatabaseClient.list_flavors
train
def list_flavors(self, limit=None, marker=None): """Returns a list of all available Flavors.""" return self._flavor_manager.list(limit=limit, marker=marker)
python
{ "resource": "" }
q17878
CloudDatabaseClient._get_flavor_ref
train
def _get_flavor_ref(self, flavor): """ Flavors are odd in that the API expects an href link, not an ID, as with nearly every other resource. This method takes either a CloudDatabaseFlavor object, a flavor ID, a RAM size, or a flavor name, and uses that to determine the appropriate href. """ flavor_obj = None if isinstance(flavor, CloudDatabaseFlavor): flavor_obj = flavor elif isinstance(flavor, int): # They passed an ID or a size try: flavor_obj = self.get_flavor(flavor) except exc.NotFound: # Must be either a size or bad ID, which will # be handled below pass if flavor_obj is None: # Try flavor name flavors = self.list_flavors() try: flavor_obj = [flav for flav in flavors if flav.name == flavor][0] except IndexError: # No such name; try matching RAM try: flavor_obj = [flav for flav in flavors if flav.ram == flavor][0] except IndexError: raise exc.FlavorNotFound("Could not determine flavor from " "'%s'." % flavor) # OK, we have a Flavor object. Get the href href = [link["href"] for link in flavor_obj.links if link["rel"] == "self"][0] return href
python
{ "resource": "" }
q17879
runproc
train
def runproc(cmd): """ Convenience method for executing operating system commands. Accepts a single string that would be the command as executed on the command line. Returns a 2-tuple consisting of the output of (STDOUT, STDERR). In your code you should check for an empty STDERR output to determine if your command completed successfully. """ proc = Popen([cmd], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) stdoutdata, stderrdata = proc.communicate() return (stdoutdata, stderrdata)
python
{ "resource": "" }
q17880
_join_chars
train
def _join_chars(chars, length): """ Used by the random character functions. """ mult = int(length / len(chars)) + 1 mult_chars = chars * mult return "".join(random.sample(mult_chars, length))
python
{ "resource": "" }
q17881
random_unicode
train
def random_unicode(length=20): """ Generates a random name; useful for testing. Returns an encoded string of the specified length containing unicode values up to code point 1000. """ def get_char(): return six.unichr(random.randint(32, 1000)) chars = u"".join([get_char() for ii in six.moves.range(length)]) return _join_chars(chars, length)
python
{ "resource": "" }
q17882
coerce_to_list
train
def coerce_to_list(val): """ For parameters that can take either a single string or a list of strings, this function will ensure that the result is a list containing the passed values. """ if val: if not isinstance(val, (list, tuple)): val = [val] else: val = [] return val
python
{ "resource": "" }
q17883
folder_size
train
def folder_size(pth, ignore=None): """ Returns the total bytes for the specified path, optionally ignoring any files which match the 'ignore' parameter. 'ignore' can either be a single string pattern, or a list of such patterns. """ if not os.path.isdir(pth): raise exc.FolderNotFound ignore = coerce_to_list(ignore) total = 0 for root, _, names in os.walk(pth): paths = [os.path.realpath(os.path.join(root, nm)) for nm in names] for pth in paths[::-1]: if not os.path.exists(pth): paths.remove(pth) elif match_pattern(pth, ignore): paths.remove(pth) total += sum(os.stat(pth).st_size for pth in paths) return total
python
{ "resource": "" }
q17884
add_method
train
def add_method(obj, func, name=None): """Adds an instance method to an object.""" if name is None: name = func.__name__ if sys.version_info < (3,): method = types.MethodType(func, obj, obj.__class__) else: method = types.MethodType(func, obj) setattr(obj, name, method)
python
{ "resource": "" }
q17885
wait_until
train
def wait_until(obj, att, desired, callback=None, interval=5, attempts=0, verbose=False, verbose_atts=None): """ When changing the state of an object, it will commonly be in a transitional state until the change is complete. This will reload the object every `interval` seconds, and check its `att` attribute until the `desired` value is reached, or until the maximum number of attempts is reached. The updated object is returned. It is up to the calling program to check the returned object to make sure that it successfully reached the desired state. Once the desired value of the attribute is reached, the method returns. If not, it will re-try until the attribute's value matches one of the `desired` values. By default (attempts=0) it will loop infinitely until the attribute reaches the desired value. You can optionally limit the number of times that the object is reloaded by passing a positive value to `attempts`. If the attribute has not reached the desired value by then, the method will exit. If `verbose` is True, each attempt will print out the current value of the watched attribute and the time that has elapsed since the original request. Also, if `verbose_atts` is specified, the values of those attributes will also be output. If `verbose` is False, then `verbose_atts` has no effect. Note that `desired` can be a list of values; if the attribute becomes equal to any of those values, this will succeed. For example, when creating a new cloud server, it will initially have a status of 'BUILD', and you can't work with it until its status is 'ACTIVE'. However, there might be a problem with the build process, and the server will change to a status of 'ERROR'. So for this case you need to set the `desired` parameter to `['ACTIVE', 'ERROR']`. If you simply pass 'ACTIVE' as the desired state, this will loop indefinitely if a build fails, as the server will never reach a status of 'ACTIVE'. Since this process of waiting can take a potentially long time, and will block your program's execution until the desired state of the object is reached, you may specify a callback function. The callback can be any callable that accepts a single parameter; the parameter it receives will be either the updated object (success), or None (failure). If a callback is specified, the program will return immediately after spawning the wait process in a separate thread. """ if callback: waiter = _WaitThread(obj=obj, att=att, desired=desired, callback=callback, interval=interval, attempts=attempts, verbose=verbose, verbose_atts=verbose_atts) waiter.start() return waiter else: return _wait_until(obj=obj, att=att, desired=desired, callback=None, interval=interval, attempts=attempts, verbose=verbose, verbose_atts=verbose_atts)
python
{ "resource": "" }
q17886
_wait_until
train
def _wait_until(obj, att, desired, callback, interval, attempts, verbose, verbose_atts): """ Loops until either the desired value of the attribute is reached, or the number of attempts is exceeded. """ if not isinstance(desired, (list, tuple)): desired = [desired] if verbose_atts is None: verbose_atts = [] if not isinstance(verbose_atts, (list, tuple)): verbose_atts = [verbose_atts] infinite = (attempts == 0) attempt = 0 start = time.time() while infinite or (attempt < attempts): try: # For servers: obj.get() except AttributeError: try: # For other objects that don't support .get() obj = obj.manager.get(obj.id) except AttributeError: # punt raise exc.NoReloadError("The 'wait_until' method is not " "supported for '%s' objects." % obj.__class__) attval = getattr(obj, att) if verbose: elapsed = time.time() - start msgs = ["Current value of %s: %s (elapsed: %4.1f seconds)" % ( att, attval, elapsed)] for vatt in verbose_atts: vattval = getattr(obj, vatt, None) msgs.append("%s=%s" % (vatt, vattval)) print(" ".join(msgs)) if attval in desired: return obj time.sleep(interval) attempt += 1 return obj
python
{ "resource": "" }
q17887
_parse_datetime_string
train
def _parse_datetime_string(val): """ Attempts to parse a string representation of a date or datetime value, and returns a datetime if successful. If not, a InvalidDateTimeString exception will be raised. """ dt = None lenval = len(val) fmt = {19: "%Y-%m-%d %H:%M:%S", 10: "%Y-%m-%d"}.get(lenval) if fmt is None: # Invalid date raise exc.InvalidDateTimeString("The supplied value '%s' does not " "match either of the formats 'YYYY-MM-DD HH:MM:SS' or " "'YYYY-MM-DD'." % val) return datetime.datetime.strptime(val, fmt)
python
{ "resource": "" }
q17888
rfc2822_format
train
def rfc2822_format(val): """ Takes either a date, a datetime, or a string, and returns a string that represents the value in RFC 2822 format. If a string is passed it is returned unchanged. """ if isinstance(val, six.string_types): return val elif isinstance(val, (datetime.datetime, datetime.date)): # Convert to a timestamp val = time.mktime(val.timetuple()) if isinstance(val, numbers.Number): return email.utils.formatdate(val) else: # Bail return val
python
{ "resource": "" }
q17889
get_id
train
def get_id(id_or_obj): """ Returns the 'id' attribute of 'id_or_obj' if present; if not, returns 'id_or_obj'. """ if isinstance(id_or_obj, six.string_types + (int,)): # It's an ID return id_or_obj try: return id_or_obj.id except AttributeError: return id_or_obj
python
{ "resource": "" }
q17890
get_name
train
def get_name(name_or_obj): """ Returns the 'name' attribute of 'name_or_obj' if present; if not, returns 'name_or_obj'. """ if isinstance(name_or_obj, six.string_types): # It's a name return name_or_obj try: return name_or_obj.name except AttributeError: raise exc.MissingName(name_or_obj)
python
{ "resource": "" }
q17891
params_to_dict
train
def params_to_dict(params, dct): """ Updates the 'dct' dictionary with the 'params' dictionary, filtering out all those whose param value is None. """ for param, val in params.items(): if val is None: continue dct[param] = val return dct
python
{ "resource": "" }
q17892
dict_to_qs
train
def dict_to_qs(dct): """ Takes a dictionary and uses it to create a query string. """ itms = ["%s=%s" % (key, val) for key, val in list(dct.items()) if val is not None] return "&".join(itms)
python
{ "resource": "" }
q17893
match_pattern
train
def match_pattern(nm, patterns): """ Compares `nm` with the supplied patterns, and returns True if it matches at least one. Patterns are standard file-name wildcard strings, as defined in the `fnmatch` module. For example, the pattern "*.py" will match the names of all Python scripts. """ patterns = coerce_to_list(patterns) for pat in patterns: if fnmatch.fnmatch(nm, pat): return True return False
python
{ "resource": "" }
q17894
update_exc
train
def update_exc(exc, msg, before=True, separator="\n"): """ Adds additional text to an exception's error message. The new text will be added before the existing text by default; to append it after the original text, pass False to the `before` parameter. By default the old and new text will be separated by a newline. If you wish to use a different separator, pass that as the `separator` parameter. """ emsg = exc.message if before: parts = (msg, separator, emsg) else: parts = (emsg, separator, msg) new_msg = "%s%s%s" % parts new_args = (new_msg, ) + exc.args[1:] exc.message = new_msg exc.args = new_args return exc
python
{ "resource": "" }
q17895
case_insensitive_update
train
def case_insensitive_update(dct1, dct2): """ Given two dicts, updates the first one with the second, but considers keys that are identical except for case to be the same. No return value; this function modified dct1 similar to the update() method. """ lowkeys = dict([(key.lower(), key) for key in dct1]) for key, val in dct2.items(): d1_key = lowkeys.get(key.lower(), key) dct1[d1_key] = val
python
{ "resource": "" }
q17896
env
train
def env(*args, **kwargs): """ Returns the first environment variable set if none are non-empty, defaults to "" or keyword arg default """ for arg in args: value = os.environ.get(arg, None) if value: return value return kwargs.get("default", "")
python
{ "resource": "" }
q17897
to_slug
train
def to_slug(value, incoming=None, errors="strict"): """Normalize string. Convert to lowercase, remove non-word characters, and convert spaces to hyphens. This function was copied from novaclient.openstack.strutils Inspired by Django's `slugify` filter. :param value: Text to slugify :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: slugified unicode representation of `value` :raises TypeError: If text is not an instance of str """ value = safe_decode(value, incoming, errors) # NOTE(aababilov): no need to use safe_(encode|decode) here: # encodings are always "ascii", error handling is always "ignore" # and types are always known (first: unicode; second: str) value = unicodedata.normalize("NFKD", value).encode( "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value)
python
{ "resource": "" }
q17898
_WaitThread.run
train
def run(self): """Starts the thread.""" resp = _wait_until(obj=self.obj, att=self.att, desired=self.desired, callback=None, interval=self.interval, attempts=self.attempts, verbose=False, verbose_atts=None) self.callback(resp)
python
{ "resource": "" }
q17899
assure_volume
train
def assure_volume(fnc): """ Converts a volumeID passed as the volume to a CloudBlockStorageVolume object. """ @wraps(fnc) def _wrapped(self, volume, *args, **kwargs): if not isinstance(volume, CloudBlockStorageVolume): # Must be the ID volume = self._manager.get(volume) return fnc(self, volume, *args, **kwargs) return _wrapped
python
{ "resource": "" }