code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
available_runs = [result['params']['RngRun'] for result in self.get_results()] yield from DatabaseManager.get_next_values(available_runs)
def get_next_rngruns(self)
Yield the next RngRun values that can be used in this campaign.
17.547707
11.422522
1.536238
# This dictionary serves as a model for how the keys in the newly # inserted result should be structured. example_result = { 'params': {k: ['...'] for k in self.get_params() + ['RngRun']}, 'meta': {k: ['...'] for k in ['elapsed_time', 'id']}, } # Verify result format is correct if not(DatabaseManager.have_same_structure(result, example_result)): raise ValueError( '%s:\nExpected: %s\nGot: %s' % ( "Result dictionary does not correspond to database format", pformat(example_result, depth=1), pformat(result, depth=1))) # Insert result self.db.table('results').insert(result)
def insert_result(self, result)
Insert a new result in the database. This function also verifies that the result dictionaries saved in the database have the following structure (with {'a': 1} representing a dictionary, 'a' a key and 1 its value):: { 'params': { 'param1': value1, 'param2': value2, ... 'RngRun': value3 }, 'meta': { 'elapsed_time': value4, 'id': value5 } } Where elapsed time is a float representing the seconds the simulation execution took, and id is a UUID uniquely identifying the result, and which is used to locate the output files in the campaign_dir/data folder.
6.822059
4.806293
1.419401
# In this case, return all results # A cast to dict is necessary, since self.db.table() contains TinyDB's # Document object (which is simply a wrapper for a dictionary, thus the # simple cast). if result_id is not None: return [dict(i) for i in self.db.table('results').all() if i['meta']['id'] == result_id] if params is None: return [dict(i) for i in self.db.table('results').all()] # Verify parameter format is correct all_params = set(['RngRun'] + self.get_params()) param_subset = set(params.keys()) if (not all_params.issuperset(param_subset)): raise ValueError( '%s:\nParameters: %s\nQuery: %s' % ( 'Specified parameter keys do not match database format', all_params, param_subset)) # Convert values that are not lists into lists to later perform # iteration over values more naturally. Perform this on a new # dictionary not to modify the original copy. query_params = {} for key in params: if not isinstance(params[key], list): query_params[key] = [params[key]] else: query_params[key] = params[key] # Handle case where query params has no keys if not query_params.keys(): return [dict(i) for i in self.db.table('results').all()] # Create the TinyDB query # In the docstring example above, this is equivalent to: # AND(OR(param1 == value1), OR(param2 == value2, param2 == value3)) query = reduce(and_, [reduce(or_, [ where('params')[key] == v for v in value]) for key, value in query_params.items()]) return [dict(i) for i in self.db.table('results').search(query)]
def get_results(self, params=None, result_id=None)
Return all the results available from the database that fulfill some parameter combinations. If params is None (or not specified), return all results. If params is specified, it must be a dictionary specifying the result values we are interested in, with multiple values specified as lists. For example, if the following params value is used:: params = { 'param1': 'value1', 'param2': ['value2', 'value3'] } the database will be queried for results having param1 equal to value1, and param2 equal to value2 or value3. Not specifying a value for all the available parameters is allowed: unspecified parameters are assumed to be 'free', and can take any value. Returns: A list of results matching the query. Returned results have the same structure as results inserted with the insert_result method.
4.389269
4.348163
1.009454
if isinstance(result, dict): result_id = result['meta']['id'] else: # Should already be a string containing the id result_id = result result_data_dir = os.path.join(self.get_data_dir(), result_id) filenames = next(os.walk(result_data_dir))[2] filename_path_pairs = [ (f, os.path.join(self.get_data_dir(), result_id, f)) for f in filenames] return {k: v for k, v in filename_path_pairs}
def get_result_files(self, result)
Return a dictionary containing filename: filepath values for each output file associated with an id. Result can be either a result dictionary (e.g., obtained with the get_results() method) or a result id.
2.913197
2.571922
1.132693
if result_id is not None: results = deepcopy(self.get_results(result_id=result_id)) else: results = deepcopy(self.get_results(params)) for r in results: r['output'] = {} available_files = self.get_result_files(r['meta']['id']) for name, filepath in available_files.items(): with open(filepath, 'r') as file_contents: r['output'][name] = file_contents.read() return results
def get_complete_results(self, params=None, result_id=None)
Return available results, analogously to what get_results does, but also read the corresponding output files for each result, and incorporate them in the result dictionary under the output key, as a dictionary of filename: file_contents. Args: params (dict): parameter specification of the desired parameter values, as described in the get_results documentation. In other words, results returned by this function will be in the form:: { 'params': { 'param1': value1, 'param2': value2, ... 'RngRun': value3 }, 'meta': { 'elapsed_time': value4, 'id': value5 } 'output': { 'stdout': stdout_as_string, 'stderr': stderr_as_string, 'file1': file1_as_string, ... } } Note that the stdout and stderr entries are always included, even if they are empty.
2.791218
2.402437
1.161828
# Clean results table self.db.purge_table('results') # Get rid of contents of data dir map(shutil.rmtree, glob.glob(os.path.join(self.get_data_dir(), '*.*')))
def wipe_results(self)
Remove all results from the database. This also removes all output files, and cannot be undone.
6.951356
6.563885
1.059031
# Keys of this level are the same if set(d1.keys()) != set(d2.keys()): return False # Check nested dictionaries for k1, k2 in zip(sorted(d1.keys()), sorted(d2.keys())): # If one of the values is a dictionary and the other is not if isinstance(d1[k1], dict) != isinstance(d2[k2], dict): return False # If both are dictionaries, recur elif isinstance(d1[k1], dict) and isinstance(d2[k2], dict): if not DatabaseManager.have_same_structure(d1[k1], d2[k2]): return False return True
def have_same_structure(d1, d2)
Given two dictionaries (possibly with other nested dictionaries as values), this function checks whether they have the same key structure. >>> from sem import DatabaseManager >>> d1 = {'a': 1, 'b': 2} >>> d2 = {'a': [], 'b': 3} >>> d3 = {'a': 4, 'c': 5} >>> DatabaseManager.have_same_structure(d1, d2) True >>> DatabaseManager.have_same_structure(d1, d3) False >>> d4 = {'a': {'c': 1}, 'b': 2} >>> d5 = {'a': {'c': 3}, 'b': 4} >>> d6 = {'a': {'c': 5, 'd': 6}, 'b': 7} >>> DatabaseManager.have_same_structure(d1, d4) False >>> DatabaseManager.have_same_structure(d4, d5) True >>> DatabaseManager.have_same_structure(d4, d6) False
2.367429
2.342058
1.010832
values = collections.OrderedDict([[p, []] for p in sorted(self.get_params())]) for result in self.get_results(): for param in self.get_params(): values[param] += [result['params'][param]] sorted_values = collections.OrderedDict([[k, sorted(list(set(values[k])))] for k in values.keys()]) for k in sorted_values.keys(): if sorted_values[k] == []: sorted_values[k] = None return sorted_values
def get_all_values_of_all_params(self)
Return a dictionary containing all values that are taken by all available parameters. Always returns the parameter list in alphabetical order.
3.021084
3.035003
0.995414
return self.helper.string.serialization.serialize( obj=obj, method=method, beautify=beautify, raise_exception=raise_exception)
def serialize(self, obj, method='json', beautify=False, raise_exception=False)
Alias of helper.string.serialization.serialize
4.098084
2.050485
1.998593
return self.helper.string.serialization.deserialize( text, method=method, encoding=encoding, raise_exception=raise_exception)
def deserialize(self, text, method='json', encoding='utf8', raise_exception=False)
Alias of helper.string.serialization.deserialize
5.09929
2.269831
2.24655
# Open up a session s = drmaa.Session() s.initialize() # Create a job template for each parameter combination jobs = {} for parameter in parameter_list: # Initialize result current_result = { 'params': {}, 'meta': {} } current_result['params'].update(parameter) command = " ".join([self.script_executable] + ['--%s=%s' % (param, value) for param, value in parameter.items()]) # Run from dedicated temporary folder current_result['meta']['id'] = str(uuid.uuid4()) temp_dir = os.path.join(data_folder, current_result['meta']['id']) if not os.path.exists(temp_dir): os.makedirs(temp_dir) jt = s.createJobTemplate() jt.remoteCommand = os.path.dirname( os.path.abspath(__file__)) + '/run_program.sh' jt.args = [command] jt.jobEnvironment = self.environment jt.workingDirectory = temp_dir jt.nativeSpecification = SIMULATION_GRID_PARAMS output_filename = os.path.join(temp_dir, 'stdout') error_filename = os.path.join(temp_dir, 'stderr') jt.outputPath = ':' + output_filename jt.errorPath = ':' + error_filename jobid = s.runJob(jt) # Save the template in our dictionary jobs[jobid] = { 'template': jt, 'result': current_result, 'output': output_filename, 'error': error_filename, } # Check for job completion, yield results when they are ready try: while len(jobs): found_done = False for curjob in jobs.keys(): try: status = s.jobStatus(curjob) except drmaa.errors.DrmCommunicationException: pass if status is drmaa.JobState.DONE: current_result = jobs[curjob]['result'] # TODO Actually compute time elapsed in the running # state current_result['meta']['elapsed_time'] = 0 try: s.deleteJobTemplate(jobs[curjob]['template']) except drmaa.errors.DrmCommunicationException: pass del jobs[curjob] found_done = True yield current_result break if not found_done: # Sleep if we can't find a completed task time.sleep(6) finally: try: for v in jobs.values(): s.deleteJobTemplate(v['template']) s.control(drmaa.JOB_IDS_SESSION_ALL, drmaa.JobControlAction.TERMINATE) s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True) s.exit() except(drmaa.errors.NoActiveSessionException): pass
def run_simulations(self, parameter_list, data_folder)
This function runs multiple simulations in parallel.
3.354481
3.392533
0.988783
# At the moment, we rely on regex to extract the list of available # parameters. A tighter integration with waf would allow for a more # natural extraction of the information. stdout = self.run_program("%s %s" % (self.script_executable, '--PrintHelp'), environment=self.environment, native_spec=BUILD_GRID_PARAMS) options = re.findall('.*Program\s(?:Arguments|Options):(.*)' 'General\sArguments.*', stdout, re.DOTALL) if len(options): args = re.findall('.*--(.*?):.*', options[0], re.MULTILINE) return args else: return []
def get_available_parameters(self)
Return a list of the parameters made available by the script.
10.118304
9.825874
1.029761
try: s = drmaa.Session() s.initialize() jt = s.createJobTemplate() jt.remoteCommand = os.path.dirname( os.path.abspath(__file__)) + '/run_program.sh' jt.args = [command] if environment is not None: jt.jobEnvironment = environment jt.workingDirectory = working_directory jt.nativeSpecification = native_spec output_filename = os.path.join(working_directory, 'output.txt') jt.outputPath = ':' + output_filename jt.joinFiles = True jobid = s.runJob(jt) s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER) with open(output_filename, 'r') as output: stdout = output.read() # Clean up if cleanup_files: os.remove(output_filename) finally: try: s.control(drmaa.JOB_IDS_SESSION_ALL, drmaa.JobControlAction.TERMINATE) s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True) s.exit() except(drmaa.errors.NoActiveSessionException): pass return stdout
def run_program(self, command, working_directory=os.getcwd(), environment=None, cleanup_files=True, native_spec="-l cputype=intel")
Run a program through the grid, capturing the standard output.
2.846341
2.817943
1.010077
'''Return a binary mask of all pixels which are adjacent to a pixel of a different label. ''' high = labels.max()+1 if high > np.iinfo(labels.dtype).max: labels = labels.astype(np.int) image_with_high_background = labels.copy() image_with_high_background[labels == 0] = high min_label = scind.minimum_filter(image_with_high_background, footprint=np.ones((3,3),bool), mode = 'constant', cval = high) max_label = scind.maximum_filter(labels, footprint=np.ones((3,3),bool), mode = 'constant', cval = 0) return (min_label != max_label) & (labels > 0)
def adjacent(labels)
Return a binary mask of all pixels which are adjacent to a pixel of a different label.
3.272564
2.608837
1.254415
hit_or_miss = scind.binary_hit_or_miss(image, strel1, strel2) return np.logical_and(image,np.logical_not(hit_or_miss))
def binary_thin(image, strel1, strel2)
Morphologically thin an image strel1 - the required values of the pixels in order to survive strel2 - at each pixel, the complement of strel1 if we care about the value
3.594012
4.170188
0.861835
iradius = int(radius) x,y = np.mgrid[-iradius:iradius+1,-iradius:iradius+1] radius2 = radius * radius strel = np.zeros(x.shape) strel[x*x+y*y <= radius2] = 1 return strel
def strel_disk(radius)
Create a disk structuring element for morphological operations radius - radius of the disk
2.419426
2.610682
0.926741
iradius = int(radius) i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)] strel = (((i+j) <= radius) & ((i+j) >= -radius) & ((i-j) <= radius) & ((i-j) >= -radius)) return strel
def strel_diamond(radius)
Create a diamond structuring element for morphological operations radius - the offset of the corners of the diamond from the origin rounded down (e.g. r=2: (0, 2), (2, 0), (0, -2), (-2, 0)) returns a two-dimensional binary array
2.817836
3.020271
0.932975
angle = float(angle) * np.pi / 180. x_off = int(np.finfo(float).eps + np.cos(angle) * length / 2) # Y is flipped here because "up" is negative y_off = -int(np.finfo(float).eps + np.sin(angle) * length / 2) x_center = abs(x_off) y_center = abs(y_off) strel = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool) draw_line(strel, (y_center - y_off, x_center - x_off), (y_center, x_center), True) draw_line(strel, (y_center + y_off, x_center + x_off), (y_center, x_center), True) return strel
def strel_line(length, angle)
Create a line structuring element for morphological operations length - distance between first and last pixels of the line, rounded down angle - angle from the horizontal, counter-clockwise in degrees. Note: uses draw_line's Bresenham algorithm to select points.
2.507609
2.503838
1.001506
# # Inscribe a diamond in a square to get an octagon. # iradius = int(radius) i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)] # # The distance to the diagonal side is also iradius: # # iradius ** 2 = i**2 + j**2 and i = j # iradius ** 2 = 2 * i ** 2 # i = iradius / sqrt(2) # i + j = iradius * sqrt(2) # dradius = float(iradius) * np.sqrt(2) strel = (((i+j) <= dradius) & ((i+j) >= -dradius) & ((i-j) <= dradius) & ((i-j) >= -dradius)) return strel
def strel_octagon(radius)
Create an octagonal structuring element for morphological operations radius - the distance from the origin to each edge of the octagon
3.797353
3.918345
0.969122
x_center = int(np.abs(x)) y_center = int(np.abs(y)) result = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool) result[y_center, x_center] = True result[y_center + int(y), x_center + int(x)] = True return result
def strel_pair(x, y)
Create a structing element composed of the origin and another pixel x, y - x and y offsets of the other pixel returns a structuring element
2.380865
2.352432
1.012087
xoff, yoff, n = [int(t) for t in (xoff, yoff, n)] center_x, center_y = abs(n * xoff), abs(n * yoff) result = np.zeros((center_y * 2 + 1, center_x * 2 + 1), bool) k = np.arange(-n, n+1) result[center_y + yoff * k, center_x + xoff * k] = True return result
def strel_periodicline(xoff, yoff, n)
Create a structuring element composed of a line of evenly-spaced points xoff, yoff - the line goes through the origin and this point n - the line is composed of the origin and n points on either side of the origin for a total of 2*n + 1 points The structuring element is composed of the points, (k * yoff, k * xoff) for k in range(-n, n+1)
2.535343
2.541145
0.997717
return np.ones([int((hw - 1) // 2) * 2 + 1 for hw in (height, width)], bool)
def strel_rectangle(width, height)
Create a rectangular structuring element width - the width of the structuring element (in the j direction). The width will be rounded down to the nearest multiple of 2*n+1 height = the height of the structuring element (in the i direction). The height will be rounded down to the nearest multiple of 2*n+1
6.871542
6.758088
1.016788
center = np.array(structure.shape) // 2 if offset is None: offset = center origin = np.array(offset) - center return scind.maximum_filter(image, footprint=structure, origin=origin, mode='constant', cval=np.min(image))
def cpmaximum(image, structure=np.ones((3,3),dtype=bool),offset=None)
Find the local maximum at each point in the image, using the given structuring element image - a 2-d array of doubles structure - a boolean structuring element indicating which local elements should be sampled offset - the offset to the center of the structuring element
3.254833
4.324656
0.752622
# # Build a label table that converts an old label # into # labels using the new numbering scheme # unique_labels = np.unique(image[image!=0]) if len(unique_labels) == 0: return (image,0) consecutive_labels = np.arange(len(unique_labels))+1 label_table = np.zeros(unique_labels.max()+1, int) label_table[unique_labels] = consecutive_labels # # Use the label table to remap all of the labels # new_image = label_table[image] return (new_image,len(unique_labels))
def relabel(image)
Given a labeled image, relabel each of the objects consecutively image - a labeled 2-d integer array returns - (labeled image, object count)
3.505965
3.402454
1.030423
'''Given a binary image, return an image of the convex hull''' labels = image.astype(int) points, counts = convex_hull(labels, np.array([1])) output = np.zeros(image.shape, int) for i in range(counts[0]): inext = (i+1) % counts[0] draw_line(output, points[i,1:], points[inext,1:],1) output = fill_labeled_holes(output) return output == 1
def convex_hull_image(image)
Given a binary image, return an image of the convex hull
4.320349
4.374887
0.987534
if indexes is None: indexes = np.unique(labels) indexes.sort() indexes=indexes[indexes!=0] else: indexes=np.array(indexes) if len(indexes) == 0: return np.zeros((0,2),int),np.zeros((0,),int) # # Reduce the # of points to consider # outlines = outline(labels) coords = np.argwhere(outlines > 0).astype(np.int32) if len(coords)==0: # Every outline of every image is blank return (np.zeros((0,3),int), np.zeros((len(indexes),),int)) i = coords[:,0] j = coords[:,1] labels_per_point = labels[i,j] pixel_labels = np.column_stack((i,j,labels_per_point)) return convex_hull_ijv(pixel_labels, indexes, fast)
def convex_hull(labels, indexes=None, fast=True)
Given a labeled image, return a list of points per object ordered by angle from an interior point, representing the convex hull.s labels - the label matrix indexes - an array of label #s to be processed, defaults to all non-zero labels Returns a matrix and a vector. The matrix consists of one row per point in the convex hull. Each row has three columns, the label #, the i coordinate of the point and the j coordinate of the point. The result is organized first by label, then the points are arranged counter-clockwise around the perimeter. The vector is a vector of #s of points in the convex hull per label
3.781507
3.645708
1.037249
v1 = (p2 - p1).astype(np.float) v2 = (p3 - p1).astype(np.float) # Original: # cross1 = v1[:,1] * v2[:,0] # cross2 = v2[:,1] * v1[:,0] # a = (cross1-cross2) / 2 # Memory reduced: cross1 = v1[:, 1] cross1 *= v2[:, 0] cross2 = v2[:, 1] cross2 *= v1[:, 0] a = cross1 a -= cross2 a /= 2.0 del v1, v2, cross1, cross2 a = a.copy() # a is a view on v1; shed one dimension. a = np.abs(a) # # Handle small round-off errors # a[a<np.finfo(np.float32).eps] = 0 return a
def triangle_areas(p1,p2,p3)
Compute an array of triangle areas given three arrays of triangle pts p1,p2,p3 - three Nx2 arrays of points
3.43481
3.469392
0.990032
y0,x0 = pt0 y1,x1 = pt1 diff_y = abs(y1-y0) diff_x = abs(x1-x0) x = x0 y = y0 labels[y,x]=value step_x = (x1 > x0 and 1) or -1 step_y = (y1 > y0 and 1) or -1 if diff_y > diff_x: # Y varies fastest, do x before y remainder = diff_x*2 - diff_y while y != y1: if remainder >= 0: x += step_x remainder -= diff_y*2 y += step_y remainder += diff_x*2 labels[y,x] = value else: remainder = diff_y*2 - diff_x while x != x1: if remainder >= 0: y += step_y remainder -= diff_x*2 x += step_x remainder += diff_y*2 labels[y,x] = value
def draw_line(labels,pt0,pt1,value=1)
Draw a line between two points pt0, pt1 are in i,j format which is the reverse of x,y format Uses the Bresenham algorithm Some code transcribed from http://www.cs.unc.edu/~mcmillan/comp136/Lecture6/Lines.html
2.105663
1.995505
1.055203
if getattr(whatever_it_returned,"__getitem__",False): return np.array(whatever_it_returned) else: return np.array([whatever_it_returned])
def fixup_scipy_ndimage_result(whatever_it_returned)
Convert a result from scipy.ndimage to a numpy array scipy.ndimage has the annoying habit of returning a single, bare value instead of an array if the indexes passed in are of length 1. For instance: scind.maximum(image, labels, [1]) returns a float but scind.maximum(image, labels, [1,2]) returns a list
2.595665
2.973917
0.87281
'''Return the i,j coordinates of the centers of a labels matrix The result returned is an 2 x n numpy array where n is the number of the label minus one, result[0,x] is the i coordinate of the center and result[x,1] is the j coordinate of the center. You can unpack the result as "i,j = centers_of_labels(labels)" ''' max_labels = np.max(labels) if max_labels == 0: return np.zeros((2,0),int) result = scind.center_of_mass(np.ones(labels.shape), labels, np.arange(max_labels)+1) result = np.array(result) if result.ndim == 1: result.shape = (2,1) return result return result.transpose()
def centers_of_labels(labels)
Return the i,j coordinates of the centers of a labels matrix The result returned is an 2 x n numpy array where n is the number of the label minus one, result[0,x] is the i coordinate of the center and result[x,1] is the j coordinate of the center. You can unpack the result as "i,j = centers_of_labels(labels)"
4.465417
1.994051
2.239369
'''Return the i,j coordinates of the maximum value within each object image - measure the maximum within this image labels - use the objects within this labels matrix indices - label #s to measure The result returned is an 2 x n numpy array where n is the number of the label minus one, result[0,x] is the i coordinate of the center and result[x,1] is the j coordinate of the center. ''' if len(indices) == 0: return np.zeros((2,0),int) result = scind.maximum_position(image, labels, indices) result = np.array(result,int) if result.ndim == 1: result.shape = (2,1) return result return result.transpose()
def maximum_position_of_labels(image, labels, indices)
Return the i,j coordinates of the maximum value within each object image - measure the maximum within this image labels - use the objects within this labels matrix indices - label #s to measure The result returned is an 2 x n numpy array where n is the number of the label minus one, result[0,x] is the i coordinate of the center and result[x,1] is the j coordinate of the center.
6.02579
1.911704
3.152051
'''Return the minimum distance or 0 if overlap between 2 convex hulls hull_a - list of points in clockwise direction center_a - a point within the hull hull_b - list of points in clockwise direction center_b - a point within the hull ''' if hull_a.shape[0] < 3 or hull_b.shape[0] < 3: return slow_minimum_distance2(hull_a, hull_b) else: return faster_minimum_distance2(hull_a, center_a, hull_b, center_b)
def minimum_distance2(hull_a, center_a, hull_b, center_b)
Return the minimum distance or 0 if overlap between 2 convex hulls hull_a - list of points in clockwise direction center_a - a point within the hull hull_b - list of points in clockwise direction center_b - a point within the hull
2.726506
1.783581
1.528669
'''Do the minimum distance by exhaustive examination of all points''' d2_min = np.iinfo(int).max for a in hull_a: if within_hull(a, hull_b): return 0 for b in hull_b: if within_hull(b, hull_a): return 0 for pt_a in hull_a: for pt_b in hull_b: d2_min = min(d2_min, np.sum((pt_a - pt_b)**2)) for h1, h2 in ((hull_a, hull_b), (hull_b, hull_a)): # Find the distance from a vertex in h1 to an edge in h2 for pt1 in h1: prev_pt2 = h2[-1,:] for pt2 in h2: if (np.dot(pt2-prev_pt2,pt1-prev_pt2) > 0 and np.dot(prev_pt2-pt2,pt1-pt2) > 0): # points form an acute triangle, so edge is closer # than vertices d2_min = min(d2_min, distance2_to_line(pt1, prev_pt2, pt2)) prev_pt2 = pt2 return d2_min
def slow_minimum_distance2(hull_a, hull_b)
Do the minimum distance by exhaustive examination of all points
2.781738
2.566301
1.083948
'''Return true if two line segments intersect pt1_p, pt2_p - endpoints of first line segment pt1_q, pt2_q - endpoints of second line segment ''' # # The idea here is to do the cross-product of the vector from # point 1 to point 2 of one segment against the cross products from # both points of the other segment. If any of the cross products are zero, # the point is colinear with the line. If the cross products differ in # sign, then one point is on one side of the line and the other is on # the other. If that happens for both, then the lines must cross. # for pt1_a, pt2_a, pt1_b, pt2_b in ((pt1_p, pt2_p, pt1_q, pt2_q), (pt1_q, pt2_q, pt1_p, pt2_p)): v_a = pt2_a-pt1_a cross_a_1b = np.cross(v_a, pt1_b-pt2_a) if cross_a_1b == 0 and colinear_intersection_test(pt1_a, pt2_a, pt1_b): return True cross_a_2b = np.cross(v_a, pt2_b-pt2_a) if cross_a_2b == 0 and colinear_intersection_test(pt1_a, pt2_a, pt2_b): return True if (cross_a_1b < 0) == (cross_a_2b < 0): return False return True
def lines_intersect(pt1_p, pt2_p, pt1_q, pt2_q)
Return true if two line segments intersect pt1_p, pt2_p - endpoints of first line segment pt1_q, pt2_q - endpoints of second line segment
2.579469
2.506892
1.028951
'''Find the vertex in hull farthest away from a point''' d_start = np.sum((point-hull[0,:])**2) d_end = np.sum((point-hull[-1,:])**2) if d_start > d_end: # Go in the forward direction i = 1 inc = 1 term = hull.shape[0] d2_max = d_start else: # Go in the reverse direction i = hull.shape[0]-2 inc = -1 term = -1 d2_max = d_end while i != term: d2 = np.sum((point - hull[i,:])**2) if d2 < d2_max: break i += inc d2_max = d2 return i-inc
def find_farthest(point, hull)
Find the vertex in hull farthest away from a point
2.514901
2.39667
1.049331
'''Given an observer location, find the first and last visible points in the hull The observer at "observer" is looking at the hull whose most distant vertex from the observer is "background. Find the vertices that are the furthest distance from the line between observer and background. These will be the start and ends in the vertex chain of vertices visible by the observer. ''' pt_background = hull[background,:] vector = pt_background - observer i = background dmax = 0 while True: i_next = (i+1) % hull.shape[0] pt_next = hull[i_next,:] d = -np.cross(vector, pt_next-pt_background) if d < dmax or i_next == background: i_min = i break dmax = d i = i_next dmax = 0 i = background while True: i_next = (i+hull.shape[0]-1) % hull.shape[0] pt_next = hull[i_next,:] d = np.cross(vector, pt_next-pt_background) if d < dmax or i_next == background: i_max = i break dmax = d i = i_next return (i_min, i_max)
def find_visible(hull, observer, background)
Given an observer location, find the first and last visible points in the hull The observer at "observer" is looking at the hull whose most distant vertex from the observer is "background. Find the vertices that are the furthest distance from the line between observer and background. These will be the start and ends in the vertex chain of vertices visible by the observer.
3.484785
1.844533
1.88925
'''The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line ''' pt = np.atleast_1d(pt) l0 = np.atleast_1d(l0) l1 = np.atleast_1d(l1) reshape = pt.ndim == 1 if reshape: pt.shape = l0.shape = l1.shape = (1, pt.shape[0]) result = (((l0[:,0] - l1[:,0]) * (l0[:,1] - pt[:,1]) - (l0[:,0] - pt[:,0]) * (l0[:,1] - l1[:,1]))**2 / np.sum((l1-l0)**2, 1)) if reshape: result = result[0] return result
def distance2_to_line(pt, l0, l1)
The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line
2.447439
2.070063
1.182302
'''Return true if the point is within the convex hull''' h_prev_pt = hull[-1,:] for h_pt in hull: if np.cross(h_pt-h_prev_pt, point - h_pt) >= 0: return False h_prev_pt = h_pt return True
def within_hull(point, hull)
Return true if the point is within the convex hull
3.104686
3.127426
0.992729
'''Find which vectors have all-true elements Given an array, "a" and indexes into the first elements of vectors within that array, return an array where each element is true if all elements of the corresponding vector are true. Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3] vectors = [[1,1,0],[1,1,1,1]] return = [False, True] ''' if len(indexes) == 0: return np.zeros(0,bool) elif len(indexes) == 1: return np.all(a) cs = np.zeros(len(a)+1,int) cs[1:] = np.cumsum(a) augmented_indexes = np.zeros(len(indexes)+1, int) augmented_indexes[0:-1] = indexes + 1 augmented_indexes[-1] = len(a) + 1 counts = augmented_indexes[1:]-augmented_indexes[0:-1] hits = cs[augmented_indexes[1:]-1] - cs[augmented_indexes[0:-1]-1] return counts == hits
def all_true(a, indexes)
Find which vectors have all-true elements Given an array, "a" and indexes into the first elements of vectors within that array, return an array where each element is true if all elements of the corresponding vector are true. Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3] vectors = [[1,1,0],[1,1,1,1]] return = [False, True]
3.683472
2.003498
1.83852
if len(indexes) == 0: return (np.zeros((0,2)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,)),np.zeros((0,))) i,j = np.argwhere(labels != 0).transpose() return ellipse_from_second_moments_ijv(i,j,image[i,j], labels[i,j], indexes, wants_compactness)
def ellipse_from_second_moments(image, labels, indexes, wants_compactness = False)
Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area
2.811697
2.913486
0.965063
fix = fixup_scipy_ndimage_result areas = fix(scind.sum(np.ones(labels.shape),labels,np.array(indexes, dtype=np.int32))) y,x = np.mgrid[0:labels.shape[0],0:labels.shape[1]] xmin = fix(scind.minimum(x, labels, indexes)) xmax = fix(scind.maximum(x, labels, indexes)) ymin = fix(scind.minimum(y, labels, indexes)) ymax = fix(scind.maximum(y, labels, indexes)) bbareas = (xmax-xmin+1)*(ymax-ymin+1) return areas / bbareas
def calculate_extents(labels, indexes)
Return the area of each object divided by the area of its bounding box
2.84906
2.7343
1.04197
# # Create arrays that tell whether a pixel is like its neighbors. # index = 0 is the pixel -1,-1 from the pixel of interest, 1 is -1,0, etc. # m = table_idx_from_labels(labels) pixel_score = __perimeter_scoring[m] return fixup_scipy_ndimage_result(scind.sum(pixel_score, labels, np.array(indexes,dtype=np.int32)))
def calculate_perimeters(labels, indexes)
Count the distances between adjacent pixels in the perimeters of the labels
12.68632
12.316647
1.030014
'''Return an array of indexes into a morphology lookup table labels - a labels matrix returns a matrix of values between 0 and 511 of indices appropriate for table_lookup where a pixel's index is determined based on whether or not the pixel has the same label as its neighbors (and is labeled) ''' m=np.zeros((labels.shape[0],labels.shape[1]),int) exponent = 0 for i in range(-1,2): ilow = (i==-1 and 1) or 0 iend = (i==1 and labels.shape[0]-1) or labels.shape[0] for j in range(-1,2): jlow = (j==-1 and 1) or 0 jend = (j==1 and labels.shape[1]-1) or labels.shape[1] # # Points outside of bounds are different from what's outside, # so set untouched points to "different" # mask = np.zeros(labels.shape, bool) mask[ilow:iend, jlow:jend] = (labels[ilow:iend,jlow:jend] == labels[ilow+i:iend+i,jlow+j:jend+j]) m[mask] += 2**exponent exponent += 1 return m
def table_idx_from_labels(labels)
Return an array of indexes into a morphology lookup table labels - a labels matrix returns a matrix of values between 0 and 511 of indices appropriate for table_lookup where a pixel's index is determined based on whether or not the pixel has the same label as its neighbors (and is labeled)
4.181766
2.385289
1.753149
if indexes is not None: indexes = np.array(indexes,dtype=np.int32) areas = scind.sum(np.ones(labels.shape),labels,indexes) convex_hull_areas = calculate_convex_hull_areas(labels, indexes) return areas / convex_hull_areas
def calculate_solidity(labels,indexes=None)
Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure
4.202816
3.761574
1.117303
shape = np.array(shape) block_shape = np.array(block_shape) i,j = np.mgrid[0:shape[0],0:shape[1]] ijmax = (shape.astype(float)/block_shape.astype(float)).astype(int) ijmax = np.maximum(ijmax, 1) multiplier = ijmax.astype(float) / shape.astype(float) i = (i * multiplier[0]).astype(int) j = (j * multiplier[1]).astype(int) labels = i * ijmax[1] + j indexes = np.array(list(range(np.product(ijmax)))) return labels, indexes
def block(shape, block_shape)
Create a labels image that divides the image into blocks shape - the shape of the image to be blocked block_shape - the shape of one block returns a labels matrix and the indexes of all labels generated The idea here is to block-process an image by using SciPy label routines. This routine divides the image into blocks of a configurable dimension. The caller then calls scipy.ndimage functions to process each block as a labeled image. The block values can then be applied to the image via indexing. For instance: labels, indexes = block(image.shape, (60,60)) minima = scind.minimum(image, labels, indexes) img2 = image - minima[labels]
2.553919
2.594615
0.984315
'''White tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations ''' # # Subtract the opening to get the tophat # final_image = image - opening(image, radius, mask, footprint) # # Paint the masked pixels into the final image # if not mask is None: not_mask = np.logical_not(mask) final_image[not_mask] = image[not_mask] return final_image
def white_tophat(image, radius=None, mask=None, footprint=None)
White tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations
5.432225
2.664855
2.038469
'''Black tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations ''' # # Subtract the image from the closing to get the bothat # final_image = closing(image, radius, mask, footprint) - image # # Paint the masked pixels into the final image # if not mask is None: not_mask = np.logical_not(mask) final_image[not_mask] = image[not_mask] return final_image
def black_tophat(image, radius=None, mask=None, footprint=None)
Black tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations
6.362772
3.157264
2.015281
'''Perform a grey erosion with masking''' if footprint is None: if radius is None: footprint = np.ones((3,3),bool) radius = 1 else: footprint = strel_disk(radius)==1 else: radius = max(1, np.max(np.array(footprint.shape) // 2)) iradius = int(np.ceil(radius)) # # Do a grey_erosion with masked pixels = 1 so they don't participate # big_image = np.ones(np.array(image.shape)+iradius*2) big_image[iradius:-iradius,iradius:-iradius] = image if not mask is None: not_mask = np.logical_not(mask) big_image[iradius:-iradius,iradius:-iradius][not_mask] = 1 processed_image = scind.grey_erosion(big_image, footprint=footprint) final_image = processed_image[iradius:-iradius,iradius:-iradius] if not mask is None: final_image[not_mask] = image[not_mask] return final_image
def grey_erosion(image, radius=None, mask=None, footprint=None)
Perform a grey erosion with masking
2.69004
2.708525
0.993175
'''Do a morphological opening image - pixel image to operate on radius - use a structuring element with the given radius. If no radius, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations ''' eroded_image = grey_erosion(image, radius, mask, footprint) return grey_dilation(eroded_image, radius, mask, footprint)
def opening(image, radius=None, mask=None, footprint=None)
Do a morphological opening image - pixel image to operate on radius - use a structuring element with the given radius. If no radius, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations
4.927049
2.168481
2.27212
'''Do a morphological closing image - pixel image to operate on radius - use a structuring element with the given radius. If no structuring element, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations ''' dilated_image = grey_dilation(image, radius, mask, footprint) return grey_erosion(dilated_image, radius, mask, footprint)
def closing(image, radius=None, mask=None, footprint = None)
Do a morphological closing image - pixel image to operate on radius - use a structuring element with the given radius. If no structuring element, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations
5.259422
2.080625
2.527809
nAngles = 180//dAngle openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype) for iAngle in range(nAngles): angle = dAngle * iAngle se = strel_line(linelength,angle) openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se) imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0) return imLines
def openlines(image, linelength=10, dAngle=10, mask=None)
Do a morphological opening along lines of different angles. Return difference between max and min response to different angles for each pixel. This effectively removes dots and only keeps lines. image - pixel image to operate on length - length of the structural element angluar_resolution - angle step for the rotating lines mask - if present, only use unmasked pixels for operations
3.414423
3.67617
0.928799
'''Perform a morphological transform on an image, directed by its neighbors image - a binary image table - a 512-element table giving the transform of each pixel given the values of that pixel and its 8-connected neighbors. border_value - the value of pixels beyond the border of the image. This should test as True or False. The pixels are numbered like this: 0 1 2 3 4 5 6 7 8 The index at a pixel is the sum of 2**<pixel-number> for pixels that evaluate to true. ''' # # Test for a table that never transforms a zero into a one: # center_is_zero = np.array([(x & 2**4) == 0 for x in range(2**9)]) use_index_trick = False if (not np.any(table[center_is_zero]) and (np.issubdtype(image.dtype, bool) or np.issubdtype(image.dtype, int))): # Use the index trick use_index_trick = True invert = False elif (np.all(table[~center_is_zero]) and np.issubdtype(image.dtype, bool)): # All ones stay ones, invert the table and the image and do the trick use_index_trick = True invert = True image = ~ image # table index 0 -> 511 and the output is reversed table = ~ table[511-np.arange(512)] border_value = not border_value if use_index_trick: orig_image = image index_i, index_j, image = prepare_for_index_lookup(image, border_value) index_i, index_j = index_lookup(index_i, index_j, image, table, iterations) image = extract_from_image_lookup(orig_image, index_i, index_j) if invert: image = ~ image return image counter = 0 while counter != iterations: counter += 1 # # We accumulate into the indexer to get the index into the table # at each point in the image # if image.shape[0] < 3 or image.shape[1] < 3: image = image.astype(bool) indexer = np.zeros(image.shape,int) indexer[1:,1:] += image[:-1,:-1] * 2**0 indexer[1:,:] += image[:-1,:] * 2**1 indexer[1:,:-1] += image[:-1,1:] * 2**2 indexer[:,1:] += image[:,:-1] * 2**3 indexer[:,:] += image[:,:] * 2**4 indexer[:,:-1] += image[:,1:] * 2**5 indexer[:-1,1:] += image[1:,:-1] * 2**6 indexer[:-1,:] += image[1:,:] * 2**7 indexer[:-1,:-1] += image[1:,1:] * 2**8 else: indexer = table_lookup_index(np.ascontiguousarray(image,np.uint8)) if border_value: indexer[0,:] |= 2**0 + 2**1 + 2**2 indexer[-1,:] |= 2**6 + 2**7 + 2**8 indexer[:,0] |= 2**0 + 2**3 + 2**6 indexer[:,-1] |= 2**2 + 2**5 + 2**8 new_image = table[indexer] if np.all(new_image == image): break image = new_image return image
def table_lookup(image, table, border_value, iterations = None)
Perform a morphological transform on an image, directed by its neighbors image - a binary image table - a 512-element table giving the transform of each pixel given the values of that pixel and its 8-connected neighbors. border_value - the value of pixels beyond the border of the image. This should test as True or False. The pixels are numbered like this: 0 1 2 3 4 5 6 7 8 The index at a pixel is the sum of 2**<pixel-number> for pixels that evaluate to true.
3.491475
2.496384
1.398613
'''Return the pattern represented by an index value''' return np.array([[index & 2**0,index & 2**1,index & 2**2], [index & 2**3,index & 2**4,index & 2**5], [index & 2**6,index & 2**7,index & 2**8]], bool)
def pattern_of(index)
Return the pattern represented by an index value
2.547508
2.253209
1.130613
'''Return the index of a given pattern''' return (pattern[0,0] * 2**0 + pattern[0,1] * 2**1 + pattern[0,2] * 2**2 + pattern[1,0] * 2**3 + pattern[1,1] * 2**4 + pattern[1,2] * 2**5 + pattern[2,0] * 2**6 + pattern[2,1] * 2**7 + pattern[2,2] * 2**8)
def index_of(pattern)
Return the index of a given pattern
1.823087
1.794459
1.015954
'''Return a table suitable for table_lookup value - set all table entries matching "pattern" to "value", all others to not "value" pattern - a 3x3 boolean array with the pattern to match care - a 3x3 boolean array where each value is true if the pattern must match at that position and false if we don't care if the pattern matches at that position. ''' def fn(index, p,i,j): '''Return true if bit position "p" in index matches pattern''' return ((((index & 2**p) > 0) == pattern[i,j]) or not care[i,j]) return np.array([value if (fn(i,0,0,0) and fn(i,1,0,1) and fn(i,2,0,2) and fn(i,3,1,0) and fn(i,4,1,1) and fn(i,5,1,2) and fn(i,6,2,0) and fn(i,7,2,1) and fn(i,8,2,2)) else not value for i in range(512)], bool)
def make_table(value, pattern, care=np.ones((3,3),bool))
Return a table suitable for table_lookup value - set all table entries matching "pattern" to "value", all others to not "value" pattern - a 3x3 boolean array with the pattern to match care - a 3x3 boolean array where each value is true if the pattern must match at that position and false if we don't care if the pattern matches at that position.
3.881137
2.19147
1.77102
'''Remove all pixels from an image except for branchpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 1 ? 0 ? 0 1 0 -> 0 1 0 0 1 0 0 ? 0 ''' global branchpoints_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, branchpoints_table, False, 1) if not mask is None: result[~mask] = image[~mask] return result
def branchpoints(image, mask=None)
Remove all pixels from an image except for branchpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 1 ? 0 ? 0 1 0 -> 0 1 0 0 1 0 0 ? 0
4.886368
2.321281
2.105031
'''Count the number of branches eminating from each pixel image - a binary image mask - optional mask of pixels not to consider This is the count of the number of branches that eminate from a pixel. A pixel with neighbors fore and aft has branches fore and aft = 2. An endpoint has one branch. A fork has 3. Finally, there's the quadrabranch which has 4: 1 0 1 0 1 0 -> 4 1 0 1 ''' global branchings_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False # # Not a binary operation, so we do a convolution with the following # kernel to get the indices into the table. # kernel = np.array([[1,2,4], [8,16,32], [64,128,256]]) indexer = scind.convolve(masked_image.astype(int), kernel, mode='constant').astype(int) result = branchings_table[indexer] return result
def branchings(image, mask=None)
Count the number of branches eminating from each pixel image - a binary image mask - optional mask of pixels not to consider This is the count of the number of branches that eminate from a pixel. A pixel with neighbors fore and aft has branches fore and aft = 2. An endpoint has one branch. A fork has 3. Finally, there's the quadrabranch which has 4: 1 0 1 0 1 0 -> 4 1 0 1
6.124542
2.373902
2.579948
'''Fill in pixels that bridge gaps. 1 0 0 1 0 0 0 0 0 -> 0 1 0 0 0 1 0 0 1 ''' global bridge_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, bridge_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
def bridge(image, mask=None, iterations = 1)
Fill in pixels that bridge gaps. 1 0 0 1 0 0 0 0 0 -> 0 1 0 0 0 1 0 0 1
3.867439
2.375436
1.628097
'''Remove isolated pixels 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 0 0 0 0 Border pixels and pixels adjoining masks are removed unless one valid neighbor is true. ''' global clean_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, clean_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
def clean(image, mask=None, iterations = 1)
Remove isolated pixels 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 0 0 0 0 Border pixels and pixels adjoining masks are removed unless one valid neighbor is true.
4.965124
2.491963
1.992454
'''4-connect pixels that are 8-connected 0 0 0 0 0 ? 0 0 1 -> 0 1 1 0 1 0 ? 1 ? ''' global diag_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, diag_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
def diag(image, mask=None, iterations=1)
4-connect pixels that are 8-connected 0 0 0 0 0 ? 0 0 1 -> 0 1 1 0 1 0 ? 1 ?
4.561491
2.535667
1.798931
'''Remove all pixels from an image except for endpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 0 ? 0 0 0 1 0 -> 0 1 0 0 0 0 0 0 0 ''' global endpoints_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, endpoints_table, False, 1) if not mask is None: result[~mask] = image[~mask] return result
def endpoints(image, mask=None)
Remove all pixels from an image except for endpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 0 ? 0 0 0 1 0 -> 0 1 0 0 0 0 0 0 0
4.627199
2.252181
2.054541
'''Fill isolated black pixels 1 1 1 1 1 1 1 0 1 -> 1 1 1 1 1 1 1 1 1 ''' global fill_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = True result = table_lookup(masked_image, fill_table, True, iterations) if not mask is None: result[~mask] = image[~mask] return result
def fill(image, mask=None, iterations=1)
Fill isolated black pixels 1 1 1 1 1 1 1 0 1 -> 1 1 1 1 1 1 1 1 1
3.415129
2.507438
1.361999
'''Fill 4-connected black pixels x 1 x x 1 x 1 0 1 -> 1 1 1 x 1 x x 1 x ''' global fill4_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = True result = table_lookup(masked_image, fill4_table, True, iterations) if not mask is None: result[~mask] = image[~mask] return result
def fill4(image, mask=None, iterations=1)
Fill 4-connected black pixels x 1 x x 1 x 1 0 1 -> 1 1 1 x 1 x x 1 x
3.895884
2.50944
1.552491
'''Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1 ''' global hbreak_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, hbreak_table, False) if not mask is None: result[~mask] = image[~mask] return result
def hbreak(image, mask=None, iterations=1)
Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1
3.744528
2.405768
1.556479
'''Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1 ''' global vbreak_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, vbreak_table, False) if not mask is None: result[~mask] = image[~mask] return result
def vbreak(image, mask=None, iterations=1)
Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1
3.890568
2.448033
1.589263
'''A pixel takes the value of the majority of its neighbors ''' global majority_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, majority_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
def majority(image, mask=None, iterations=1)
A pixel takes the value of the majority of its neighbors
3.961994
3.473124
1.140758
'''Turn 1 pixels to 0 if their 4-connected neighbors are all 0 ? 1 ? ? 1 ? 1 1 1 -> 1 0 1 ? 1 ? ? 1 ? ''' global remove_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, remove_table, False) if not mask is None: result[~mask] = image[~mask] return result
def remove(image, mask=None, iterations=1)
Turn 1 pixels to 0 if their 4-connected neighbors are all 0 ? 1 ? ? 1 ? 1 1 1 -> 1 0 1 ? 1 ? ? 1 ?
5.005267
2.614547
1.914392
'''Remove spur pixels from an image 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 1 0 0 ? ''' global spur_table_1,spur_table_2 if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, False) if iterations is None: iterations = len(index_i) for i in range(iterations): for table in (spur_table_1, spur_table_2): index_i, index_j = index_lookup(index_i, index_j, masked_image, table, 1) masked_image = extract_from_image_lookup(image, index_i, index_j) if not mask is None: masked_image[~mask] = image[~mask] return masked_image
def spur(image, mask=None, iterations=1)
Remove spur pixels from an image 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 1 0 0 ?
3.069102
2.578575
1.190232
'''Thicken the objects in an image where doing so does not connect them 0 0 0 ? ? ? 0 0 0 -> ? 1 ? 0 0 1 ? ? ? 1 0 0 ? ? ? 0 0 0 -> ? 0 ? 0 0 1 ? ? ? ''' global thicken_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, thicken_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
def thicken(image, mask=None, iterations=1)
Thicken the objects in an image where doing so does not connect them 0 0 0 ? ? ? 0 0 0 -> ? 1 ? 0 0 1 ? ? ? 1 0 0 ? ? ? 0 0 0 -> ? 0 ? 0 0 1 ? ? ?
3.710635
2.063251
1.798441
'''Thin an image to lines, preserving Euler number Implements thinning as described in algorithm # 1 from Guo, "Parallel Thinning with Two Subiteration Algorithms", Communications of the ACM, Vol 32 #3 page 359. ''' global thin_table, eight_connect if thin_table is None: thin_table = np.zeros((2,512),bool) for i in range(512): if (i & 16) == 0: # All zeros -> 0 continue pat = pattern_of(i & ~ 16) ipat = pat.astype(int) if scind.label(pat, eight_connect)[1] != 1: thin_table[:,i] = True continue n1 = ((ipat[0,0] or ipat[0,1]) + (ipat[0,2] or ipat[1,2])+ (ipat[2,2] or ipat[2,1]) + (ipat[2,0] or ipat[1,0])) n2 = ((ipat[0,1] or ipat[0,2]) + (ipat[1,2] or ipat[2,2])+ (ipat[2,1] or ipat[2,0]) + (ipat[1,0] or ipat[0,0])) if min(n1,n2) not in (2,3): thin_table[:,i] = True continue thin_table[0,i] = ((pat[0,1] or pat[0,2] or not pat[2,2]) and pat[1,2]) thin_table[1,i] = ((pat[2,1] or pat[2,0] or not pat[0,0]) and pat[1,0]) if mask is None: masked_image = image.copy() else: masked_image = image.copy() masked_image[~mask] = False index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, False) if iterations is None: iterations = len(index_i) for i in range(iterations): hit_count = len(index_i) for j in range(2): index_i, index_j, = index_lookup(index_i, index_j, masked_image, thin_table[j], 1) if hit_count == len(index_i): break masked_image = extract_from_image_lookup(image, index_i, index_j) if not mask is None: masked_image[~mask] = masked_image[~mask] return masked_image
def thin(image, mask=None, iterations=1)
Thin an image to lines, preserving Euler number Implements thinning as described in algorithm # 1 from Guo, "Parallel Thinning with Two Subiteration Algorithms", Communications of the ACM, Vol 32 #3 page 359.
3.378658
2.611367
1.293827
'''Recolor a labels matrix so that adjacent labels have distant numbers ''' # # Color labels so adjacent ones are most distant # colors = color_labels(labels, True) # # Order pixels by color, then label # # rlabels = labels.ravel() order = np.lexsort((rlabels, colors.ravel())) # # Construct color indices with the cumsum trick: # cumsum([0,0,1,0,1]) = [0,0,1,1,2] # and copy back into the color array, using the order. # different = np.hstack([[rlabels[order[0]] > 0], rlabels[order[1:]] != rlabels[order[:-1]]]) # We need to careful about ravel() returning a new object, but in the usual # case of colors having order='C', this won't create any copies. rcolor = colors.ravel() rcolor[order] = np.cumsum(different).astype(colors.dtype) return rcolor.reshape(colors.shape).astype(labels.dtype)
def distance_color_labels(labels)
Recolor a labels matrix so that adjacent labels have distant numbers
7.142723
6.363606
1.122433
'''Color a labels matrix so that no adjacent labels have the same color distance_transform - if true, distance transform the labels to find out which objects are closest to each other. Create a label coloring matrix which assigns a color (1-n) to each pixel in the labels matrix such that all pixels similarly labeled are similarly colored and so that no similiarly colored, 8-connected pixels have different labels. You can use this function to partition the labels matrix into groups of objects that are not touching; you can then operate on masks and be assured that the pixels from one object won't interfere with pixels in another. returns the color matrix ''' if distance_transform: i,j = scind.distance_transform_edt(labels == 0, return_distances=False, return_indices = True) dt_labels = labels[i,j] else: dt_labels = labels # Get the neighbors for each object v_count, v_index, v_neighbor = find_neighbors(dt_labels) # Quickly get rid of labels with no neighbors. Greedily assign # all of these a color of 1 v_color = np.zeros(len(v_count)+1,int) # the color per object - zero is uncolored zero_count = (v_count==0) if np.all(zero_count): # can assign all objects the same color return (labels!=0).astype(int) v_color[1:][zero_count] = 1 v_count = v_count[~zero_count] v_index = v_index[~zero_count] v_label = np.argwhere(~zero_count).transpose()[0]+1 # If you process the most connected labels first and use a greedy # algorithm to preferentially assign a label to an existing color, # you'll get a coloring that uses 1+max(connections) at most. # # Welsh, "An upper bound for the chromatic number of a graph and # its application to timetabling problems", The Computer Journal, 10(1) # p 85 (1967) # sort_order = np.lexsort([-v_count]) v_count = v_count[sort_order] v_index = v_index[sort_order] v_label = v_label[sort_order] for i in range(len(v_count)): neighbors = v_neighbor[v_index[i]:v_index[i]+v_count[i]] colors = np.unique(v_color[neighbors]) if colors[0] == 0: if len(colors) == 1: # only one color and it's zero. All neighbors are unlabeled v_color[v_label[i]] = 1 continue else: colors = colors[1:] # The colors of neighbors will be ordered, so there are two cases: # * all colors up to X appear - colors == np.arange(1,len(colors)+1) # * some color is missing - the color after the first missing will # be mislabeled: colors[i] != np.arange(1, len(colors)+1) crange = np.arange(1,len(colors)+1) misses = crange[colors != crange] if len(misses): color = misses[0] else: color = len(colors)+1 v_color[v_label[i]] = color return v_color[labels]
def color_labels(labels, distance_transform = False)
Color a labels matrix so that no adjacent labels have the same color distance_transform - if true, distance transform the labels to find out which objects are closest to each other. Create a label coloring matrix which assigns a color (1-n) to each pixel in the labels matrix such that all pixels similarly labeled are similarly colored and so that no similiarly colored, 8-connected pixels have different labels. You can use this function to partition the labels matrix into groups of objects that are not touching; you can then operate on masks and be assured that the pixels from one object won't interfere with pixels in another. returns the color matrix
5.445582
3.451466
1.577759
'''Skeletonize the image Take the distance transform. Order the 1 points by the distance transform. Remove a point if it has more than 1 neighbor and if removing it does not change the Euler number. image - the binary image to be skeletonized mask - only skeletonize pixels within the mask ordering - a matrix of the same dimensions as the image. The matrix provides the ordering of the erosion with the lowest values being eroded first. The default is to use the distance transform. ''' global eight_connect if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False # # Lookup table - start with only positive pixels. # Keep if # pixels in neighborhood is 2 or less # Keep if removing the pixel results in a different connectivity # table = (make_table(True,np.array([[0,0,0],[0,1,0],[0,0,0]],bool), np.array([[0,0,0],[0,1,0],[0,0,0]],bool)) & (np.array([scind.label(pattern_of(index), eight_connect)[1] != scind.label(pattern_of(index & ~ 2**4), eight_connect)[1] for index in range(512) ]) | np.array([np.sum(pattern_of(index))<3 for index in range(512)]))) if ordering is None: distance = scind.distance_transform_edt(masked_image) else: distance = ordering # # The processing order along the edge is critical to the shape of the # resulting skeleton: if you process a corner first, that corner will # be eroded and the skeleton will miss the arm from that corner. Pixels # with fewer neighbors are more "cornery" and should be processed last. # cornerness_table = np.array([9-np.sum(pattern_of(index)) for index in range(512)]) corner_score = table_lookup(masked_image, cornerness_table, False,1) i,j = np.mgrid[0:image.shape[0],0:image.shape[1]] result=masked_image.copy() distance = distance[result] i = np.ascontiguousarray(i[result],np.int32) j = np.ascontiguousarray(j[result],np.int32) result=np.ascontiguousarray(result,np.uint8) # # We use a random # for tiebreaking. Assign each pixel in the image a # predictable, random # so that masking doesn't affect arbitrary choices # of skeletons # np.random.seed(0) tiebreaker=np.random.permutation(np.arange(np.product(masked_image.shape))) tiebreaker.shape=masked_image.shape order = np.lexsort((tiebreaker[masked_image], corner_score[masked_image], distance)) order = np.ascontiguousarray(order, np.int32) table = np.ascontiguousarray(table, np.uint8) skeletonize_loop(result, i, j, order, table) result = result.astype(bool) if not mask is None: result[~mask] = image[~mask] return result
def skeletonize(image, mask=None, ordering = None)
Skeletonize the image Take the distance transform. Order the 1 points by the distance transform. Remove a point if it has more than 1 neighbor and if removing it does not change the Euler number. image - the binary image to be skeletonized mask - only skeletonize pixels within the mask ordering - a matrix of the same dimensions as the image. The matrix provides the ordering of the erosion with the lowest values being eroded first. The default is to use the distance transform.
4.907971
3.70509
1.324657
'''Skeletonize a labels matrix''' # # The trick here is to separate touching labels by coloring the # labels matrix and then processing each color separately # colors = color_labels(labels) max_color = np.max(colors) if max_color == 0: return labels result = np.zeros(labels.shape, labels.dtype) for i in range(1,max_color+1): mask = skeletonize(colors==i) result[mask] = labels[mask] return result
def skeletonize_labels(labels)
Skeletonize a labels matrix
4.244234
4.399155
0.964784
'''Compute the length of all skeleton branches for labeled skeletons labels - a labels matrix indices - the indexes of the labels to be measured. Default is all returns an array of one skeleton length per label. ''' global __skel_length_table if __skel_length_table is None: tbl = np.zeros(512, np.float32) for ii in range(-1, 2): for jj in range(-1, 2): if ii == 0 and jj == 0: continue # # Set the bit to search for and the center bit # idx = 2 ** (ii + 1 + (jj + 1) * 3) | 16 mask = (np.arange(512) & idx) == idx # # If we are four-connected to another pixel that is # connected to this one, they are 8-connected and that # is the distance. # # bad good # x 1 0 0 0 0 # x 1 1 0 1 0 # x x x 0 x 1 if ii != 0 and jj != 0: for adjacent_i, adjacent_j in ( (ii-1, jj), (ii, jj-1), (ii+1, jj), (ii, jj+1)): if any([_ < -1 or _ > 1 for _ in (adjacent_i, adjacent_j)]): continue aidx = 2 ** (adjacent_i+1 + (adjacent_j + 1) * 3) mask = mask & ((np.arange(512) & aidx) != aidx) tbl[mask] += np.sqrt(ii*ii + jj*jj) / 2 __skel_length_table = tbl if indices is None: indices = np.arange(1, np.max(labels)+1) else: indices = np.asanyarray(indices) if len(indices) == 0: return np.zeros(0) score = __skel_length_table[table_idx_from_labels(labels)] result = np.bincount(labels.ravel(), weights = score.ravel(), minlength=np.max(indices)+1) return result[indices]
def skeleton_length(labels, indices=None)
Compute the length of all skeleton branches for labeled skeletons labels - a labels matrix indices - the indexes of the labels to be measured. Default is all returns an array of one skeleton length per label.
3.715226
3.083791
1.204759
'''Compute the distance of a pixel to the edge of its object labels - a labels matrix returns a matrix of distances ''' colors = color_labels(labels) max_color = np.max(colors) result = np.zeros(labels.shape) if max_color == 0: return result for i in range(1, max_color+1): mask = (colors==i) result[mask] = scind.distance_transform_edt(mask)[mask] return result
def distance_to_edge(labels)
Compute the distance of a pixel to the edge of its object labels - a labels matrix returns a matrix of distances
3.798887
2.645179
1.436155
'''Associate each label in i with a component # This function finds all connected components given an array of associations between labels i and j using a depth-first search. i & j give the edges of the graph. The first step of the algorithm makes bidirectional edges, (i->j and j<-i), so it's best to only send the edges in one direction (although the algorithm can withstand duplicates). returns a label for each vertex up to the maximum named vertex in i. ''' if len(i) == 0: return i i1 = np.hstack((i,j)) j1 = np.hstack((j,i)) order = np.lexsort((j1,i1)) i=np.ascontiguousarray(i1[order],np.uint32) j=np.ascontiguousarray(j1[order],np.uint32) # # Get indexes and counts of edges per vertex # counts = np.ascontiguousarray(np.bincount(i.astype(int)),np.uint32) indexes = np.ascontiguousarray(np.cumsum(counts)-counts,np.uint32) # # This stores the lowest index # during the algorithm - the first # vertex to be labeled in a connected component. # labels = np.zeros(len(counts), np.uint32) _all_connected_components(i,j,indexes,counts,labels) return labels
def all_connected_components(i,j)
Associate each label in i with a component # This function finds all connected components given an array of associations between labels i and j using a depth-first search. i & j give the edges of the graph. The first step of the algorithm makes bidirectional edges, (i->j and j<-i), so it's best to only send the edges in one direction (although the algorithm can withstand duplicates). returns a label for each vertex up to the maximum named vertex in i.
5.352625
2.280618
2.347006
'''Return a boolean array of points that are local maxima image - intensity image labels - find maxima only within labels. Zero is reserved for background. footprint - binary mask indicating the neighborhood to be examined must be a matrix with odd dimensions, center is taken to be the point in question. ''' assert((np.all(footprint.shape) & 1) == 1) footprint = (footprint != 0) footprint_extent = (np.array(footprint.shape)-1) // 2 if np.all(footprint_extent == 0): return labels > 0 result = (labels > 0).copy() # # Create a labels matrix with zeros at the borders that might be # hit by the footprint. # big_labels = np.zeros(np.array(labels.shape) + footprint_extent*2, labels.dtype) big_labels[[slice(fe,-fe) for fe in footprint_extent]] = labels # # Find the relative indexes of each footprint element # image_strides = np.array(image.strides) // image.dtype.itemsize big_strides = np.array(big_labels.strides) // big_labels.dtype.itemsize result_strides = np.array(result.strides) // result.dtype.itemsize footprint_offsets = np.mgrid[[slice(-fe,fe+1) for fe in footprint_extent]] footprint_offsets = footprint_offsets[:, footprint] # # Order by distance, low to high and get rid of center pt. # d = np.sum(footprint_offsets **2, 0) footprint_offsets, d = footprint_offsets[:, d > 0], d[d > 0] footprint_offsets = footprint_offsets[:, np.lexsort([d])] fp_image_offsets = np.sum(image_strides[:, np.newaxis] * footprint_offsets, 0) fp_big_offsets = np.sum(big_strides[:, np.newaxis] * footprint_offsets, 0) # # Get the index of each labeled pixel in the image and big_labels arrays # indexes = np.mgrid[[slice(0,x) for x in labels.shape]][:, labels > 0] image_indexes = np.sum(image_strides[:, np.newaxis] * indexes, 0) big_indexes = np.sum(big_strides[:, np.newaxis] * (indexes + footprint_extent[:, np.newaxis]), 0) result_indexes = np.sum(result_strides[:, np.newaxis] * indexes, 0) # # Now operate on the raveled images # big_labels_raveled = big_labels.ravel() image_raveled = image.ravel() result_raveled = result.ravel() # # A hit is a hit if the label at the offset matches the label at the pixel # and if the intensity at the pixel is greater or equal to the intensity # at the offset. # for fp_image_offset, fp_big_offset in zip(fp_image_offsets, fp_big_offsets): same_label = (big_labels_raveled[big_indexes + fp_big_offset] == big_labels_raveled[big_indexes]) less_than = (image_raveled[image_indexes[same_label]] < image_raveled[image_indexes[same_label]+ fp_image_offset]) mask = ~same_label mask[same_label] = ~less_than result_raveled[result_indexes[~mask]] = False result_indexes = result_indexes[mask] big_indexes = big_indexes[mask] image_indexes = image_indexes[mask] return result
def is_local_maximum(image, labels, footprint)
Return a boolean array of points that are local maxima image - intensity image labels - find maxima only within labels. Zero is reserved for background. footprint - binary mask indicating the neighborhood to be examined must be a matrix with odd dimensions, center is taken to be the point in question.
2.970374
2.54538
1.166967
'''For each object in labels, compute the angular distribution around the centers of mass. Returns an i x j matrix, where i is the number of objects in the label matrix, and j is the resolution of the distribution (default 100), mapped from -pi to pi. Optionally, the distributions can be weighted by pixel. The algorithm approximates the angular width of pixels relative to the object centers, in an attempt to be accurate for small objects. The ChordRatio of an object can be approximated by >>> angdist = angular_distribution(labels, resolution) >>> angdist2 = angdist[:, :resolution//2] + angdist[:, resolution//2] # map to widths, rather than radii >>> chord_ratio = np.sqrt(angdist2.max(axis=1) / angdist2.min(axis=1)) # sqrt because these are sectors, not triangles ''' if weights is None: weights = np.ones(labels.shape) maxlabel = labels.max() ci, cj = centers_of_labels(labels) j, i = np.meshgrid(np.arange(labels.shape[0]), np.arange(labels.shape[1])) # compute deltas from pixels to object centroids, and mask to labels di = i[labels > 0] - ci[labels[labels > 0] - 1] dj = j[labels > 0] - cj[labels[labels > 0] - 1] weights = weights[labels > 0] labels = labels[labels > 0] # find angles, and angular width of pixels angle = np.arctan2(di, dj) # Use pixels of width 2 to get some smoothing width = np.arctan(1.0 / np.sqrt(di**2 + dj**2 + np.finfo(float).eps)) # create an onset/offset array of size 3 * resolution lo = np.clip((angle - width) * resolution / (2 * np.pi), -resolution, 2 * resolution).astype(int) + resolution hi = np.clip((angle + width) * resolution / (2 * np.pi), -resolution, 2 * resolution).astype(int) + resolution # make sure every pixel counts at least once hi[lo == hi] += 1 # normalize weights by their angular width (adding a bit to avoid 0 / 0) weights /= (hi - lo) onset = scipy.sparse.coo_matrix((weights, (labels - 1, lo)), (maxlabel, 3 * resolution)).toarray() offset = scipy.sparse.coo_matrix((weights, (labels - 1, hi)), (maxlabel, 3 * resolution)).toarray() # sum onset/offset to get actual distribution onoff = onset - offset dist = np.cumsum(onoff, axis=1) dist = dist[:, :resolution] + dist[:, resolution:2*resolution] + dist[:, 2*resolution:] return dist
def angular_distribution(labels, resolution=100, weights=None)
For each object in labels, compute the angular distribution around the centers of mass. Returns an i x j matrix, where i is the number of objects in the label matrix, and j is the resolution of the distribution (default 100), mapped from -pi to pi. Optionally, the distributions can be weighted by pixel. The algorithm approximates the angular width of pixels relative to the object centers, in an attempt to be accurate for small objects. The ChordRatio of an object can be approximated by >>> angdist = angular_distribution(labels, resolution) >>> angdist2 = angdist[:, :resolution//2] + angdist[:, resolution//2] # map to widths, rather than radii >>> chord_ratio = np.sqrt(angdist2.max(axis=1) / angdist2.min(axis=1)) # sqrt because these are sectors, not triangles
4.160002
2.330529
1.785003
'''Determine whether the angle, p1 - v - p2 is obtuse p1 - N x 2 array of coordinates of first point on edge v - N x 2 array of vertex coordinates p2 - N x 2 array of coordinates of second point on edge returns vector of booleans ''' p1x = p1[:,1] p1y = p1[:,0] p2x = p2[:,1] p2y = p2[:,0] vx = v[:,1] vy = v[:,0] Dx = vx - p2x Dy = vy - p2y Dvp1x = p1x - vx Dvp1y = p1y - vy return Dvp1x * Dx + Dvp1y * Dy > 0
def is_obtuse(p1, v, p2)
Determine whether the angle, p1 - v - p2 is obtuse p1 - N x 2 array of coordinates of first point on edge v - N x 2 array of vertex coordinates p2 - N x 2 array of coordinates of second point on edge returns vector of booleans
2.676318
1.764568
1.516698
(M,N) = x.shape Mean = x.mean(0) y = x - Mean cov = numpy.dot(y.transpose(),y) / (M-1) (V,PC) = numpy.linalg.eig(cov) order = (-V).argsort() coeff = PC[:,order] return coeff
def princomp(x)
Determine the principal components of a vector of measurements Determine the principal components of a vector of measurements x should be a M x N numpy array composed of M observations of n variables The output is: coeffs - the NxN correlation matrix that can be used to transform x into its components The code for this function is based on "A Tutorial on Principal Component Analysis", Shlens, 2005 http://www.snl.salk.edu/~shlens/pub/notes/pca.pdf (unpublished)
3.837716
3.608865
1.063414
'''Return an (n*(n - 1)) x 2 array of all non-identity pairs of n things n - # of things The array is (cleverly) ordered so that the first m * (m - 1) elements can be used for m < n things: n = 3 [[0, 1], # n = 2 [1, 0], # n = 2 [0, 2], [1, 2], [2, 0], [2, 1]] ''' # Get all against all i, j = [x.flatten() for x in np.mgrid[0:n, 0:n]] # Eliminate the diagonal of i == j i, j = [x[i != j] for x in (i,j)] # Order by smallest of i or j first, then j then i for neatness order = np.lexsort((j, i, np.maximum(i, j))) return np.column_stack((i[order], j[order]))
def all_pairs(n)
Return an (n*(n - 1)) x 2 array of all non-identity pairs of n things n - # of things The array is (cleverly) ordered so that the first m * (m - 1) elements can be used for m < n things: n = 3 [[0, 1], # n = 2 [1, 0], # n = 2 [0, 2], [1, 2], [2, 0], [2, 1]]
5.289998
2.349223
2.251807
'''Normalize an image to make the minimum zero and maximum one image - pixel data to be normalized mask - optional mask of relevant pixels. None = don't mask returns the stretched image ''' image = np.array(image, float) if np.product(image.shape) == 0: return image if mask is None: minval = np.min(image) maxval = np.max(image) if minval == maxval: if minval < 0: return np.zeros_like(image) elif minval > 1: return np.ones_like(image) return image else: return (image - minval) / (maxval - minval) else: significant_pixels = image[mask] if significant_pixels.size == 0: return image minval = np.min(significant_pixels) maxval = np.max(significant_pixels) if minval == maxval: transformed_image = minval else: transformed_image = ((significant_pixels - minval) / (maxval - minval)) result = image.copy() image[mask] = transformed_image return image
def stretch(image, mask=None)
Normalize an image to make the minimum zero and maximum one image - pixel data to be normalized mask - optional mask of relevant pixels. None = don't mask returns the stretched image
2.493174
1.856108
1.343227
'''Masked median filter with octagonal shape data - array of data to be median filtered. mask - mask of significant pixels in data radius - the radius of a circle inscribed into the filtering octagon percent - conceptually, order the significant pixels in the octagon, count them and choose the pixel indexed by the percent times the count divided by 100. More simply, 50 = median returns a filtered array. In areas where the median filter does not overlap the mask, the filtered result is undefined, but in practice, it will be the lowest value in the valid area. ''' if mask is None: mask = np.ones(data.shape, dtype=bool) if np.all(~ mask): return data.copy() # # Normalize the ranked data to 0-255 # if (not np.issubdtype(data.dtype, np.int) or np.min(data) < 0 or np.max(data) > 255): ranked_data, translation = rank_order(data[mask], nbins=255) was_ranked = True else: ranked_data = data[mask] was_ranked = False input = np.zeros(data.shape, np.uint8 ) input[mask] = ranked_data mmask = np.ascontiguousarray(mask, np.uint8) output = np.zeros(data.shape, np.uint8) _filter.median_filter(input, mmask, output, radius, percent) if was_ranked: result = translation[output] else: result = output return result
def median_filter(data, mask, radius, percent=50)
Masked median filter with octagonal shape data - array of data to be median filtered. mask - mask of significant pixels in data radius - the radius of a circle inscribed into the filtering octagon percent - conceptually, order the significant pixels in the octagon, count them and choose the pixel indexed by the percent times the count divided by 100. More simply, 50 = median returns a filtered array. In areas where the median filter does not overlap the mask, the filtered result is undefined, but in practice, it will be the lowest value in the valid area.
5.285609
2.182629
2.421671
'''Perform the Laplacian of Gaussian transform on the image image - 2-d image array mask - binary mask of significant pixels size - length of side of square kernel to use sigma - standard deviation of the Gaussian ''' half_size = size//2 i,j = np.mgrid[-half_size:half_size+1, -half_size:half_size+1].astype(float) / float(sigma) distance = (i**2 + j**2)/2 gaussian = np.exp(-distance) # # Normalize the Gaussian # gaussian = gaussian / np.sum(gaussian) log = (distance - 1) * gaussian # # Normalize the kernel to have a sum of zero # log = log - np.mean(log) if mask is None: mask = np.ones(image.shape[:2], bool) masked_image = image.copy() masked_image[~mask] = 0 output = convolve(masked_image, log, mode='constant', cval=0) # # Do the LoG of the inverse of the mask. This finds the magnitude of the # contribution of the masked pixels. We then fudge by multiplying by the # value at the pixel of interest - this effectively sets the value at a # masked pixel to that of the pixel of interest. # # It underestimates the LoG, that's not a terrible thing. # correction = convolve((~ mask).astype(float), log, mode='constant', cval = 1) output += correction * image output[~ mask] = image[~ mask] return output
def laplacian_of_gaussian(image, mask, size, sigma)
Perform the Laplacian of Gaussian transform on the image image - 2-d image array mask - binary mask of significant pixels size - length of side of square kernel to use sigma - standard deviation of the Gaussian
4.337165
3.763666
1.152378
'''Find edges using the Roberts algorithm image - the image to process mask - mask of relevant points The algorithm returns the magnitude of the output of the two Roberts convolution kernels. The following is the canonical citation for the algorithm: L. Roberts Machine Perception of 3-D Solids, Optical and Electro-optical Information Processing, MIT Press 1965. The following website has a tutorial on the algorithm: http://homepages.inf.ed.ac.uk/rbf/HIPR2/roberts.htm ''' result = np.zeros(image.shape) # # Four quadrants and two convolutions: # # q0,0 | q0,1 1 | 0 anti-diagonal # q1,0 | q1,1 0 | -1 # # q-1,0 | q0,0 0 | 1 diagonal # q-1,1 | q0,1 -1 | 0 # # Points near the mask edges and image edges are computed unreliably # so make them zero (no edge) in the result # if mask is None: mask = np.ones(image.shape, bool) big_mask = binary_erosion(mask, generate_binary_structure(2,2), border_value = 0) result[big_mask==False] = 0 q00 = image[:,:][big_mask] q11 = image[1:,1:][big_mask[:-1,:-1]] qm11 = image[:-1,1:][big_mask[1:,:-1]] diagonal = q00 - qm11 anti_diagonal = q00 - q11 result[big_mask] = np.sqrt(diagonal*diagonal + anti_diagonal*anti_diagonal) return result
def roberts(image, mask=None)
Find edges using the Roberts algorithm image - the image to process mask - mask of relevant points The algorithm returns the magnitude of the output of the two Roberts convolution kernels. The following is the canonical citation for the algorithm: L. Roberts Machine Perception of 3-D Solids, Optical and Electro-optical Information Processing, MIT Press 1965. The following website has a tutorial on the algorithm: http://homepages.inf.ed.ac.uk/rbf/HIPR2/roberts.htm
5.143636
2.770459
1.8566
'''Calculate the absolute magnitude Sobel to find the edges image - image to process mask - mask of relevant points Take the square root of the sum of the squares of the horizontal and vertical Sobels to get a magnitude that's somewhat insensitive to direction. Note that scipy's Sobel returns a directional Sobel which isn't useful for edge detection in its raw form. ''' return np.sqrt(hsobel(image,mask)**2 + vsobel(image,mask)**2)
def sobel(image, mask=None)
Calculate the absolute magnitude Sobel to find the edges image - image to process mask - mask of relevant points Take the square root of the sum of the squares of the horizontal and vertical Sobels to get a magnitude that's somewhat insensitive to direction. Note that scipy's Sobel returns a directional Sobel which isn't useful for edge detection in its raw form.
8.476197
1.683072
5.036147
'''Find the edge magnitude using the Prewitt transform image - image to process mask - mask of relevant points Return the square root of the sum of squares of the horizontal and vertical Prewitt transforms. ''' return np.sqrt(hprewitt(image,mask)**2 + vprewitt(image,mask)**2)
def prewitt(image, mask=None)
Find the edge magnitude using the Prewitt transform image - image to process mask - mask of relevant points Return the square root of the sum of squares of the horizontal and vertical Prewitt transforms.
5.998957
2.026275
2.960583
'''Find the horizontal edges of an image using the Prewitt transform image - image to process mask - mask of relevant points We use the following kernel and return the absolute value of the result at each point: 1 1 1 0 0 0 -1 -1 -1 ''' if mask is None: mask = np.ones(image.shape, bool) big_mask = binary_erosion(mask, generate_binary_structure(2,2), border_value = 0) result = np.abs(convolve(image, np.array([[ 1, 1, 1], [ 0, 0, 0], [-1,-1,-1]]).astype(float)/3.0)) result[big_mask==False] = 0 return result
def hprewitt(image, mask=None)
Find the horizontal edges of an image using the Prewitt transform image - image to process mask - mask of relevant points We use the following kernel and return the absolute value of the result at each point: 1 1 1 0 0 0 -1 -1 -1
3.884563
2.117502
1.834503
'''Gabor-filter the objects in an image image - 2-d grayscale image to filter labels - a similarly shaped labels matrix frequency - cycles per trip around the circle theta - angle of the filter. 0 to 2 pi Calculate the Gabor filter centered on the centroids of each object in the image. Summing the resulting image over the labels matrix will yield a texture measure per object. ''' # # The code inscribes the X and Y position of each pixel relative to # the centroid of that pixel's object. After that, the Gabor filter # for the image can be calculated per-pixel and the image can be # multiplied by the filter to get the filtered image. # nobjects = np.max(labels) if nobjects == 0: return image centers = centers_of_labels(labels) areas = fix(scind.sum(np.ones(image.shape),labels, np.arange(nobjects, dtype=np.int32)+1)) mask = labels > 0 i,j = np.mgrid[0:image.shape[0],0:image.shape[1]].astype(float) i = i[mask] j = j[mask] image = image[mask] lm = labels[mask] - 1 i -= centers[0,lm] j -= centers[1,lm] sigma = np.sqrt(areas/np.pi) / 3.0 sigma = sigma[lm] g_exp = 1000.0/(2.0*np.pi*sigma**2) * np.exp(-(i**2 + j**2)/(2*sigma**2)) g_angle = 2*np.pi/frequency*(i*np.cos(theta)+j*np.sin(theta)) g_cos = g_exp * np.cos(g_angle) g_sin = g_exp * np.sin(g_angle) # # Normalize so that the sum of the filter over each object is zero # and so that there is no bias-value within each object. # g_cos_mean = fix(scind.mean(g_cos,lm, np.arange(nobjects))) i_mean = fix(scind.mean(image, lm, np.arange(nobjects))) i_norm = image - i_mean[lm] g_sin_mean = fix(scind.mean(g_sin,lm, np.arange(nobjects))) g_cos -= g_cos_mean[lm] g_sin -= g_sin_mean[lm] g = np.zeros(mask.shape,dtype=np.complex) g[mask] = i_norm *g_cos+i_norm * g_sin*1j return g
def gabor(image, labels, frequency, theta)
Gabor-filter the objects in an image image - 2-d grayscale image to filter labels - a similarly shaped labels matrix frequency - cycles per trip around the circle theta - angle of the filter. 0 to 2 pi Calculate the Gabor filter centered on the centroids of each object in the image. Summing the resulting image over the labels matrix will yield a texture measure per object.
3.745717
2.728782
1.37267
'''Enhance dark holes using a rolling ball filter image - grayscale 2-d image radii - a vector of radii: we enhance holes at each given radius ''' # # Do 4-connected erosion # se = np.array([[False, True, False], [True, True, True], [False, True, False]]) # # Invert the intensities # inverted_image = image.max() - image previous_reconstructed_image = inverted_image eroded_image = inverted_image smoothed_image = np.zeros(image.shape) for i in range(max_radius+1): eroded_image = grey_erosion(eroded_image, mask=mask, footprint = se) reconstructed_image = grey_reconstruction(eroded_image, inverted_image, footprint = se) output_image = previous_reconstructed_image - reconstructed_image if i >= min_radius: smoothed_image = np.maximum(smoothed_image,output_image) previous_reconstructed_image = reconstructed_image return smoothed_image
def enhance_dark_holes(image, min_radius, max_radius, mask=None)
Enhance dark holes using a rolling ball filter image - grayscale 2-d image radii - a vector of radii: we enhance holes at each given radius
3.493456
2.777637
1.257708
'''Enhances bright structures within a min and max radius using a rolling ball filter image - grayscale 2-d image radii - a vector of radii: we enhance holes at each given radius ''' # # Do 4-connected erosion # se = np.array([[False, True, False], [True, True, True], [False, True, False]]) # # Initialize # inverted_image = image.max() - image previous_opened_image = image eroded_image = image selected_granules_image = np.zeros(image.shape) # # Select granules by successive morphological openings # for i in range(max_radius+1): eroded_image = grey_erosion(eroded_image, mask=mask, footprint = se) opened_image = grey_dilation(eroded_image, inverted_image, footprint = se) output_image = previous_opened_image - opened_image if i >= min_radius: selected_granules_image = np.maximum(selected_granules_image, output_image) previous_opened_image = opened_image return selected_granules_image
def granulometry_filter(image, min_radius, max_radius, mask=None)
Enhances bright structures within a min and max radius using a rolling ball filter image - grayscale 2-d image radii - a vector of radii: we enhance holes at each given radius
4.597618
3.139588
1.464402
'''Return a KalmanState set up to model objects with constant velocity The observation and measurement vectors are i,j. The state vector is i,j,vi,vj ''' om = np.array([[1,0,0,0], [0, 1, 0, 0]]) tm = np.array([[1,0,1,0], [0,1,0,1], [0,0,1,0], [0,0,0,1]]) return KalmanState(om, tm)
def velocity_kalman_model()
Return a KalmanState set up to model objects with constant velocity The observation and measurement vectors are i,j. The state vector is i,j,vi,vj
4.780557
2.086572
2.291106
'''Return a KalmanState set up to model going backwards in time''' om = np.array([[1,0,0,0], [0, 1, 0, 0]]) tm = np.array([[1,0,-1,0], [0,1,0,-1], [0,0,1,0], [0,0,0,1]]) return KalmanState(om, tm)
def reverse_velocity_kalman_model()
Return a KalmanState set up to model going backwards in time
3.95774
2.571224
1.539243
'''Integrate the image along the given angle DIC images are the directional derivative of the underlying image. This filter reconstructs the original image by integrating along that direction. image - a 2-dimensional array angle - shear angle in radians. We integrate perpendicular to this angle decay - an exponential decay applied to the integration sigma - the standard deviation of a Gaussian which is used to smooth the image in the direction parallel to the shear angle. ''' # # Normalize the image so that the mean is zero # normalized = image - np.mean(image) # # Rotate the image so the J direction is perpendicular to the shear angle. # rotated = scind.rotate(normalized, -angle) # # Smooth in only the i direction # smoothed = scind.gaussian_filter1d(rotated, sigma) if sigma > 0 else rotated # # We want img_out[:,j+1] to be img_out[:,j] * decay + img[j+1] # Could be done by convolution with a ramp, maybe in FFT domain, # but we just do a bunch of steps here. # result_fwd = smoothed.copy() for i in range(1,result_fwd.shape[0]): result_fwd[i] += result_fwd[i-1] * decay result_rev = smoothed.copy() for i in reversed(range(result_rev.shape[0]-1)): result_rev[i] += result_rev[i+1] * decay result = (result_fwd - result_rev) / 2 # # Rotate and chop result # result = scind.rotate(result, angle) ipad = int((result.shape[0] - image.shape[0]) / 2) jpad = int((result.shape[1] - image.shape[1]) / 2) result = result[ipad:(ipad + image.shape[0]), jpad:(jpad + image.shape[1])] # # Scale the resultant image similarly to the output. # img_min, img_max = np.min(image), np.max(image) result_min, result_max = np.min(result), np.max(result) if (img_min == img_max) or (result_min == result_max): return np.zeros(result.shape) result = (result - result_min) / (result_max - result_min) result = img_min + result * (img_max - img_min) return result
def line_integration(image, angle, decay, sigma)
Integrate the image along the given angle DIC images are the directional derivative of the underlying image. This filter reconstructs the original image by integrating along that direction. image - a 2-dimensional array angle - shear angle in radians. We integrate perpendicular to this angle decay - an exponential decay applied to the integration sigma - the standard deviation of a Gaussian which is used to smooth the image in the direction parallel to the shear angle.
3.388121
2.422561
1.39857
'''Calculate a weighted variance of the image This function caluclates the variance of an image, weighting the local contributions by a Gaussian. img - image to be transformed sigma - standard deviation of the Gaussian mask - mask of relevant pixels in the image ''' if mask is None: mask = np.ones(img.shape, bool) else: img = img.copy() img[~mask] = 0 # # This is the Gaussian of the mask... so we can normalize for # pixels near the edge of the mask # gmask = scind.gaussian_filter(mask.astype(float), sigma, mode = 'constant') img_mean = scind.gaussian_filter(img, sigma, mode = 'constant') / gmask img_squared = scind.gaussian_filter(img ** 2, sigma, mode = 'constant') / gmask var = img_squared - img_mean ** 2 return var
def variance_transform(img, sigma, mask=None)
Calculate a weighted variance of the image This function caluclates the variance of an image, weighting the local contributions by a Gaussian. img - image to be transformed sigma - standard deviation of the Gaussian mask - mask of relevant pixels in the image
3.841584
2.57962
1.489205
'''given N matrices, return N inverses''' # # The inverse of a small matrix (e.g. 3x3) is # # 1 # ----- C(j,i) # det(A) # # where C(j,i) is the cofactor of matrix A at position j,i # assert x.ndim == 3 assert x.shape[1] == x.shape[2] c = np.array([ [cofactor_n(x, j, i) * (1 - ((i+j) % 2)*2) for j in range(x.shape[1])] for i in range(x.shape[1])]).transpose(2,0,1) return c / det_n(x)[:, np.newaxis, np.newaxis]
def inv_n(x)
given N matrices, return N inverses
4.200914
3.978336
1.055948
'''given N matrices, return N determinants''' assert x.ndim == 3 assert x.shape[1] == x.shape[2] if x.shape[1] == 1: return x[:,0,0] result = np.zeros(x.shape[0]) for permutation in permutations(np.arange(x.shape[1])): sign = parity(permutation) result += np.prod([x[:, i, permutation[i]] for i in range(x.shape[1])], 0) * sign sign = - sign return result
def det_n(x)
given N matrices, return N determinants
3.018605
2.833926
1.065167
'''The parity of a permutation The parity of a permutation is even if the permutation can be formed by an even number of transpositions and is odd otherwise. The parity of a permutation is even if there are an even number of compositions of even size and odd otherwise. A composition is a cycle: for instance in (1, 2, 0, 3), there is the cycle: (0->1, 1->2, 2->0) and the cycle, (3->3). Both cycles are odd, so the parity is even: you can exchange 0 and 1 giving (0, 2, 1, 3) and 2 and 1 to get (0, 1, 2, 3) ''' order = np.lexsort((x,)) hit = np.zeros(len(x), bool) p = 0 for j in range(len(x)): if not hit[j]: cycle = 1 i = order[j] # mark every node in a cycle while i != j: hit[i] = True i = order[i] cycle += 1 p += cycle - 1 return 1 if p % 2 == 0 else -1
def parity(x)
The parity of a permutation The parity of a permutation is even if the permutation can be formed by an even number of transpositions and is odd otherwise. The parity of a permutation is even if there are an even number of compositions of even size and odd otherwise. A composition is a cycle: for instance in (1, 2, 0, 3), there is the cycle: (0->1, 1->2, 2->0) and the cycle, (3->3). Both cycles are odd, so the parity is even: you can exchange 0 and 1 giving (0, 2, 1, 3) and 2 and 1 to get (0, 1, 2, 3)
4.86463
1.815182
2.679968
'''Return the cofactor of n matrices x[n,i,j] at position i,j The cofactor is the determinant of the matrix formed by removing row i and column j. ''' m = x.shape[1] mr = np.arange(m) i_idx = mr[mr != i] j_idx = mr[mr != j] return det_n(x[:, i_idx[:, np.newaxis], j_idx[np.newaxis, :]])
def cofactor_n(x, i, j)
Return the cofactor of n matrices x[n,i,j] at position i,j The cofactor is the determinant of the matrix formed by removing row i and column j.
3.986803
2.498512
1.595671