_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q17700
ParallelRunner.launch_simulation
train
def launch_simulation(self, parameter): """ Launch a single simulation, using SimulationRunner's facilities. This function is used by ParallelRunner's run_simulations to map simulation running over the parameter list. Args: parameter (dict): the parameter combination to simulate. """ return next(SimulationRunner.run_simulations(self, [parameter], self.data_folder))
python
{ "resource": "" }
q17701
JsonHelper.stringify
train
def stringify(self, obj, beautify=False, raise_exception=False): """Alias of helper.string.serialization.json.stringify""" return self.helper.string.serialization.json.stringify( obj=obj, beautify=beautify, raise_exception=raise_exception)
python
{ "resource": "" }
q17702
JsonHelper.parse
train
def parse(self, text, encoding='utf8', raise_exception=False): """Alias of helper.string.serialization.json.parse""" return self.helper.string.serialization.json.parse( text=text, encoding=encoding, raise_exception=raise_exception)
python
{ "resource": "" }
q17703
DatabaseManager.new
train
def new(cls, script, commit, params, campaign_dir, overwrite=False): """ Initialize a new class instance with a set configuration and filename. The created database has the same name of the campaign directory. Args: script (str): the ns-3 name of the script that will be used in this campaign; commit (str): the commit of the ns-3 installation that is used to run the simulations. params (list): a list of the parameters that can be used on the script. campaign_dir (str): The path of the file where to save the DB. overwrite (bool): Whether or not existing directories should be overwritten. """ # We only accept absolute paths if not Path(campaign_dir).is_absolute(): raise ValueError("Path is not absolute") # Make sure the directory does not exist already if Path(campaign_dir).exists() and not overwrite: raise FileExistsError("The specified directory already exists") elif Path(campaign_dir).exists() and overwrite: # Verify we are not deleting files belonging to the user campaign_dir_name = os.path.basename(campaign_dir) folder_contents = set(os.listdir(campaign_dir)) allowed_files = set( ['data', '%s.json' % campaign_dir_name] + # Allow hidden files (like .DS_STORE in macos) [os.path.basename(os.path.normpath(f)) for f in glob.glob(os.path.join(campaign_dir, ".*"))]) if(not folder_contents.issubset(allowed_files)): raise ValueError("The specified directory cannot be overwritten" " because it contains user files.") # This operation destroys data. shutil.rmtree(campaign_dir) # Create the directory and database file in it # The indent and separators ensure the database is human readable. os.makedirs(campaign_dir) tinydb = TinyDB(os.path.join(campaign_dir, "%s.json" % os.path.basename(campaign_dir))) # Save the configuration in the database config = { 'script': script, 'commit': commit, 'params': sorted(params) } tinydb.table('config').insert(config) return cls(tinydb, campaign_dir)
python
{ "resource": "" }
q17704
DatabaseManager.load
train
def load(cls, campaign_dir): """ Initialize from an existing database. It is assumed that the database json file has the same name as its containing folder. Args: campaign_dir (str): The path to the campaign directory. """ # We only accept absolute paths if not Path(campaign_dir).is_absolute(): raise ValueError("Path is not absolute") # Verify file exists if not Path(campaign_dir).exists(): raise ValueError("Directory does not exist") # Extract filename from campaign dir filename = "%s.json" % os.path.split(campaign_dir)[1] filepath = os.path.join(campaign_dir, filename) try: # Read TinyDB instance from file tinydb = TinyDB(filepath) # Make sure the configuration is a valid dictionary assert set( tinydb.table('config').all()[0].keys()) == set(['script', 'params', 'commit']) except: # Remove the database instance created by tinydb os.remove(filepath) raise ValueError("Specified campaign directory seems corrupt") return cls(tinydb, campaign_dir)
python
{ "resource": "" }
q17705
DatabaseManager.get_next_rngruns
train
def get_next_rngruns(self): """ Yield the next RngRun values that can be used in this campaign. """ available_runs = [result['params']['RngRun'] for result in self.get_results()] yield from DatabaseManager.get_next_values(available_runs)
python
{ "resource": "" }
q17706
DatabaseManager.insert_result
train
def insert_result(self, result): """ Insert a new result in the database. This function also verifies that the result dictionaries saved in the database have the following structure (with {'a': 1} representing a dictionary, 'a' a key and 1 its value):: { 'params': { 'param1': value1, 'param2': value2, ... 'RngRun': value3 }, 'meta': { 'elapsed_time': value4, 'id': value5 } } Where elapsed time is a float representing the seconds the simulation execution took, and id is a UUID uniquely identifying the result, and which is used to locate the output files in the campaign_dir/data folder. """ # This dictionary serves as a model for how the keys in the newly # inserted result should be structured. example_result = { 'params': {k: ['...'] for k in self.get_params() + ['RngRun']}, 'meta': {k: ['...'] for k in ['elapsed_time', 'id']}, } # Verify result format is correct if not(DatabaseManager.have_same_structure(result, example_result)): raise ValueError( '%s:\nExpected: %s\nGot: %s' % ( "Result dictionary does not correspond to database format", pformat(example_result, depth=1), pformat(result, depth=1))) # Insert result self.db.table('results').insert(result)
python
{ "resource": "" }
q17707
DatabaseManager.get_results
train
def get_results(self, params=None, result_id=None): """ Return all the results available from the database that fulfill some parameter combinations. If params is None (or not specified), return all results. If params is specified, it must be a dictionary specifying the result values we are interested in, with multiple values specified as lists. For example, if the following params value is used:: params = { 'param1': 'value1', 'param2': ['value2', 'value3'] } the database will be queried for results having param1 equal to value1, and param2 equal to value2 or value3. Not specifying a value for all the available parameters is allowed: unspecified parameters are assumed to be 'free', and can take any value. Returns: A list of results matching the query. Returned results have the same structure as results inserted with the insert_result method. """ # In this case, return all results # A cast to dict is necessary, since self.db.table() contains TinyDB's # Document object (which is simply a wrapper for a dictionary, thus the # simple cast). if result_id is not None: return [dict(i) for i in self.db.table('results').all() if i['meta']['id'] == result_id] if params is None: return [dict(i) for i in self.db.table('results').all()] # Verify parameter format is correct all_params = set(['RngRun'] + self.get_params()) param_subset = set(params.keys()) if (not all_params.issuperset(param_subset)): raise ValueError( '%s:\nParameters: %s\nQuery: %s' % ( 'Specified parameter keys do not match database format', all_params, param_subset)) # Convert values that are not lists into lists to later perform # iteration over values more naturally. Perform this on a new # dictionary not to modify the original copy. query_params = {} for key in params: if not isinstance(params[key], list): query_params[key] = [params[key]] else: query_params[key] = params[key] # Handle case where query params has no keys if not query_params.keys(): return [dict(i) for i in self.db.table('results').all()] # Create the TinyDB query # In the docstring example above, this is equivalent to: # AND(OR(param1 == value1), OR(param2 == value2, param2 == value3)) query = reduce(and_, [reduce(or_, [ where('params')[key] == v for v in value]) for key, value in query_params.items()]) return [dict(i) for i in self.db.table('results').search(query)]
python
{ "resource": "" }
q17708
DatabaseManager.wipe_results
train
def wipe_results(self): """ Remove all results from the database. This also removes all output files, and cannot be undone. """ # Clean results table self.db.purge_table('results') # Get rid of contents of data dir map(shutil.rmtree, glob.glob(os.path.join(self.get_data_dir(), '*.*')))
python
{ "resource": "" }
q17709
DatabaseManager.get_all_values_of_all_params
train
def get_all_values_of_all_params(self): """ Return a dictionary containing all values that are taken by all available parameters. Always returns the parameter list in alphabetical order. """ values = collections.OrderedDict([[p, []] for p in sorted(self.get_params())]) for result in self.get_results(): for param in self.get_params(): values[param] += [result['params'][param]] sorted_values = collections.OrderedDict([[k, sorted(list(set(values[k])))] for k in values.keys()]) for k in sorted_values.keys(): if sorted_values[k] == []: sorted_values[k] = None return sorted_values
python
{ "resource": "" }
q17710
SerializationHelper.serialize
train
def serialize(self, obj, method='json', beautify=False, raise_exception=False): """Alias of helper.string.serialization.serialize""" return self.helper.string.serialization.serialize( obj=obj, method=method, beautify=beautify, raise_exception=raise_exception)
python
{ "resource": "" }
q17711
SerializationHelper.deserialize
train
def deserialize(self, text, method='json', encoding='utf8', raise_exception=False): """Alias of helper.string.serialization.deserialize""" return self.helper.string.serialization.deserialize( text, method=method, encoding=encoding, raise_exception=raise_exception)
python
{ "resource": "" }
q17712
GridRunner.run_program
train
def run_program(self, command, working_directory=os.getcwd(), environment=None, cleanup_files=True, native_spec="-l cputype=intel"): """ Run a program through the grid, capturing the standard output. """ try: s = drmaa.Session() s.initialize() jt = s.createJobTemplate() jt.remoteCommand = os.path.dirname( os.path.abspath(__file__)) + '/run_program.sh' jt.args = [command] if environment is not None: jt.jobEnvironment = environment jt.workingDirectory = working_directory jt.nativeSpecification = native_spec output_filename = os.path.join(working_directory, 'output.txt') jt.outputPath = ':' + output_filename jt.joinFiles = True jobid = s.runJob(jt) s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER) with open(output_filename, 'r') as output: stdout = output.read() # Clean up if cleanup_files: os.remove(output_filename) finally: try: s.control(drmaa.JOB_IDS_SESSION_ALL, drmaa.JobControlAction.TERMINATE) s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True) s.exit() except(drmaa.errors.NoActiveSessionException): pass return stdout
python
{ "resource": "" }
q17713
adjacent
train
def adjacent(labels): '''Return a binary mask of all pixels which are adjacent to a pixel of a different label. ''' high = labels.max()+1 if high > np.iinfo(labels.dtype).max: labels = labels.astype(np.int) image_with_high_background = labels.copy() image_with_high_background[labels == 0] = high min_label = scind.minimum_filter(image_with_high_background, footprint=np.ones((3,3),bool), mode = 'constant', cval = high) max_label = scind.maximum_filter(labels, footprint=np.ones((3,3),bool), mode = 'constant', cval = 0) return (min_label != max_label) & (labels > 0)
python
{ "resource": "" }
q17714
binary_thin
train
def binary_thin(image, strel1, strel2): """Morphologically thin an image strel1 - the required values of the pixels in order to survive strel2 - at each pixel, the complement of strel1 if we care about the value """ hit_or_miss = scind.binary_hit_or_miss(image, strel1, strel2) return np.logical_and(image,np.logical_not(hit_or_miss))
python
{ "resource": "" }
q17715
strel_disk
train
def strel_disk(radius): """Create a disk structuring element for morphological operations radius - radius of the disk """ iradius = int(radius) x,y = np.mgrid[-iradius:iradius+1,-iradius:iradius+1] radius2 = radius * radius strel = np.zeros(x.shape) strel[x*x+y*y <= radius2] = 1 return strel
python
{ "resource": "" }
q17716
strel_octagon
train
def strel_octagon(radius): """Create an octagonal structuring element for morphological operations radius - the distance from the origin to each edge of the octagon """ # # Inscribe a diamond in a square to get an octagon. # iradius = int(radius) i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)] # # The distance to the diagonal side is also iradius: # # iradius ** 2 = i**2 + j**2 and i = j # iradius ** 2 = 2 * i ** 2 # i = iradius / sqrt(2) # i + j = iradius * sqrt(2) # dradius = float(iradius) * np.sqrt(2) strel = (((i+j) <= dradius) & ((i+j) >= -dradius) & ((i-j) <= dradius) & ((i-j) >= -dradius)) return strel
python
{ "resource": "" }
q17717
strel_pair
train
def strel_pair(x, y): """Create a structing element composed of the origin and another pixel x, y - x and y offsets of the other pixel returns a structuring element """ x_center = int(np.abs(x)) y_center = int(np.abs(y)) result = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool) result[y_center, x_center] = True result[y_center + int(y), x_center + int(x)] = True return result
python
{ "resource": "" }
q17718
cpmaximum
train
def cpmaximum(image, structure=np.ones((3,3),dtype=bool),offset=None): """Find the local maximum at each point in the image, using the given structuring element image - a 2-d array of doubles structure - a boolean structuring element indicating which local elements should be sampled offset - the offset to the center of the structuring element """ center = np.array(structure.shape) // 2 if offset is None: offset = center origin = np.array(offset) - center return scind.maximum_filter(image, footprint=structure, origin=origin, mode='constant', cval=np.min(image))
python
{ "resource": "" }
q17719
convex_hull_image
train
def convex_hull_image(image): '''Given a binary image, return an image of the convex hull''' labels = image.astype(int) points, counts = convex_hull(labels, np.array([1])) output = np.zeros(image.shape, int) for i in range(counts[0]): inext = (i+1) % counts[0] draw_line(output, points[i,1:], points[inext,1:],1) output = fill_labeled_holes(output) return output == 1
python
{ "resource": "" }
q17720
triangle_areas
train
def triangle_areas(p1,p2,p3): """Compute an array of triangle areas given three arrays of triangle pts p1,p2,p3 - three Nx2 arrays of points """ v1 = (p2 - p1).astype(np.float) v2 = (p3 - p1).astype(np.float) # Original: # cross1 = v1[:,1] * v2[:,0] # cross2 = v2[:,1] * v1[:,0] # a = (cross1-cross2) / 2 # Memory reduced: cross1 = v1[:, 1] cross1 *= v2[:, 0] cross2 = v2[:, 1] cross2 *= v1[:, 0] a = cross1 a -= cross2 a /= 2.0 del v1, v2, cross1, cross2 a = a.copy() # a is a view on v1; shed one dimension. a = np.abs(a) # # Handle small round-off errors # a[a<np.finfo(np.float32).eps] = 0 return a
python
{ "resource": "" }
q17721
minimum_distance2
train
def minimum_distance2(hull_a, center_a, hull_b, center_b): '''Return the minimum distance or 0 if overlap between 2 convex hulls hull_a - list of points in clockwise direction center_a - a point within the hull hull_b - list of points in clockwise direction center_b - a point within the hull ''' if hull_a.shape[0] < 3 or hull_b.shape[0] < 3: return slow_minimum_distance2(hull_a, hull_b) else: return faster_minimum_distance2(hull_a, center_a, hull_b, center_b)
python
{ "resource": "" }
q17722
slow_minimum_distance2
train
def slow_minimum_distance2(hull_a, hull_b): '''Do the minimum distance by exhaustive examination of all points''' d2_min = np.iinfo(int).max for a in hull_a: if within_hull(a, hull_b): return 0 for b in hull_b: if within_hull(b, hull_a): return 0 for pt_a in hull_a: for pt_b in hull_b: d2_min = min(d2_min, np.sum((pt_a - pt_b)**2)) for h1, h2 in ((hull_a, hull_b), (hull_b, hull_a)): # Find the distance from a vertex in h1 to an edge in h2 for pt1 in h1: prev_pt2 = h2[-1,:] for pt2 in h2: if (np.dot(pt2-prev_pt2,pt1-prev_pt2) > 0 and np.dot(prev_pt2-pt2,pt1-pt2) > 0): # points form an acute triangle, so edge is closer # than vertices d2_min = min(d2_min, distance2_to_line(pt1, prev_pt2, pt2)) prev_pt2 = pt2 return d2_min
python
{ "resource": "" }
q17723
lines_intersect
train
def lines_intersect(pt1_p, pt2_p, pt1_q, pt2_q): '''Return true if two line segments intersect pt1_p, pt2_p - endpoints of first line segment pt1_q, pt2_q - endpoints of second line segment ''' # # The idea here is to do the cross-product of the vector from # point 1 to point 2 of one segment against the cross products from # both points of the other segment. If any of the cross products are zero, # the point is colinear with the line. If the cross products differ in # sign, then one point is on one side of the line and the other is on # the other. If that happens for both, then the lines must cross. # for pt1_a, pt2_a, pt1_b, pt2_b in ((pt1_p, pt2_p, pt1_q, pt2_q), (pt1_q, pt2_q, pt1_p, pt2_p)): v_a = pt2_a-pt1_a cross_a_1b = np.cross(v_a, pt1_b-pt2_a) if cross_a_1b == 0 and colinear_intersection_test(pt1_a, pt2_a, pt1_b): return True cross_a_2b = np.cross(v_a, pt2_b-pt2_a) if cross_a_2b == 0 and colinear_intersection_test(pt1_a, pt2_a, pt2_b): return True if (cross_a_1b < 0) == (cross_a_2b < 0): return False return True
python
{ "resource": "" }
q17724
find_farthest
train
def find_farthest(point, hull): '''Find the vertex in hull farthest away from a point''' d_start = np.sum((point-hull[0,:])**2) d_end = np.sum((point-hull[-1,:])**2) if d_start > d_end: # Go in the forward direction i = 1 inc = 1 term = hull.shape[0] d2_max = d_start else: # Go in the reverse direction i = hull.shape[0]-2 inc = -1 term = -1 d2_max = d_end while i != term: d2 = np.sum((point - hull[i,:])**2) if d2 < d2_max: break i += inc d2_max = d2 return i-inc
python
{ "resource": "" }
q17725
find_visible
train
def find_visible(hull, observer, background): '''Given an observer location, find the first and last visible points in the hull The observer at "observer" is looking at the hull whose most distant vertex from the observer is "background. Find the vertices that are the furthest distance from the line between observer and background. These will be the start and ends in the vertex chain of vertices visible by the observer. ''' pt_background = hull[background,:] vector = pt_background - observer i = background dmax = 0 while True: i_next = (i+1) % hull.shape[0] pt_next = hull[i_next,:] d = -np.cross(vector, pt_next-pt_background) if d < dmax or i_next == background: i_min = i break dmax = d i = i_next dmax = 0 i = background while True: i_next = (i+hull.shape[0]-1) % hull.shape[0] pt_next = hull[i_next,:] d = np.cross(vector, pt_next-pt_background) if d < dmax or i_next == background: i_max = i break dmax = d i = i_next return (i_min, i_max)
python
{ "resource": "" }
q17726
distance2_to_line
train
def distance2_to_line(pt, l0, l1): '''The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line ''' pt = np.atleast_1d(pt) l0 = np.atleast_1d(l0) l1 = np.atleast_1d(l1) reshape = pt.ndim == 1 if reshape: pt.shape = l0.shape = l1.shape = (1, pt.shape[0]) result = (((l0[:,0] - l1[:,0]) * (l0[:,1] - pt[:,1]) - (l0[:,0] - pt[:,0]) * (l0[:,1] - l1[:,1]))**2 / np.sum((l1-l0)**2, 1)) if reshape: result = result[0] return result
python
{ "resource": "" }
q17727
within_hull
train
def within_hull(point, hull): '''Return true if the point is within the convex hull''' h_prev_pt = hull[-1,:] for h_pt in hull: if np.cross(h_pt-h_prev_pt, point - h_pt) >= 0: return False h_prev_pt = h_pt return True
python
{ "resource": "" }
q17728
calculate_extents
train
def calculate_extents(labels, indexes): """Return the area of each object divided by the area of its bounding box""" fix = fixup_scipy_ndimage_result areas = fix(scind.sum(np.ones(labels.shape),labels,np.array(indexes, dtype=np.int32))) y,x = np.mgrid[0:labels.shape[0],0:labels.shape[1]] xmin = fix(scind.minimum(x, labels, indexes)) xmax = fix(scind.maximum(x, labels, indexes)) ymin = fix(scind.minimum(y, labels, indexes)) ymax = fix(scind.maximum(y, labels, indexes)) bbareas = (xmax-xmin+1)*(ymax-ymin+1) return areas / bbareas
python
{ "resource": "" }
q17729
calculate_perimeters
train
def calculate_perimeters(labels, indexes): """Count the distances between adjacent pixels in the perimeters of the labels""" # # Create arrays that tell whether a pixel is like its neighbors. # index = 0 is the pixel -1,-1 from the pixel of interest, 1 is -1,0, etc. # m = table_idx_from_labels(labels) pixel_score = __perimeter_scoring[m] return fixup_scipy_ndimage_result(scind.sum(pixel_score, labels, np.array(indexes,dtype=np.int32)))
python
{ "resource": "" }
q17730
calculate_solidity
train
def calculate_solidity(labels,indexes=None): """Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure """ if indexes is not None: """ Convert to compat 32bit integer """ indexes = np.array(indexes,dtype=np.int32) areas = scind.sum(np.ones(labels.shape),labels,indexes) convex_hull_areas = calculate_convex_hull_areas(labels, indexes) return areas / convex_hull_areas
python
{ "resource": "" }
q17731
white_tophat
train
def white_tophat(image, radius=None, mask=None, footprint=None): '''White tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations ''' # # Subtract the opening to get the tophat # final_image = image - opening(image, radius, mask, footprint) # # Paint the masked pixels into the final image # if not mask is None: not_mask = np.logical_not(mask) final_image[not_mask] = image[not_mask] return final_image
python
{ "resource": "" }
q17732
black_tophat
train
def black_tophat(image, radius=None, mask=None, footprint=None): '''Black tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations ''' # # Subtract the image from the closing to get the bothat # final_image = closing(image, radius, mask, footprint) - image # # Paint the masked pixels into the final image # if not mask is None: not_mask = np.logical_not(mask) final_image[not_mask] = image[not_mask] return final_image
python
{ "resource": "" }
q17733
grey_erosion
train
def grey_erosion(image, radius=None, mask=None, footprint=None): '''Perform a grey erosion with masking''' if footprint is None: if radius is None: footprint = np.ones((3,3),bool) radius = 1 else: footprint = strel_disk(radius)==1 else: radius = max(1, np.max(np.array(footprint.shape) // 2)) iradius = int(np.ceil(radius)) # # Do a grey_erosion with masked pixels = 1 so they don't participate # big_image = np.ones(np.array(image.shape)+iradius*2) big_image[iradius:-iradius,iradius:-iradius] = image if not mask is None: not_mask = np.logical_not(mask) big_image[iradius:-iradius,iradius:-iradius][not_mask] = 1 processed_image = scind.grey_erosion(big_image, footprint=footprint) final_image = processed_image[iradius:-iradius,iradius:-iradius] if not mask is None: final_image[not_mask] = image[not_mask] return final_image
python
{ "resource": "" }
q17734
opening
train
def opening(image, radius=None, mask=None, footprint=None): '''Do a morphological opening image - pixel image to operate on radius - use a structuring element with the given radius. If no radius, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations ''' eroded_image = grey_erosion(image, radius, mask, footprint) return grey_dilation(eroded_image, radius, mask, footprint)
python
{ "resource": "" }
q17735
closing
train
def closing(image, radius=None, mask=None, footprint = None): '''Do a morphological closing image - pixel image to operate on radius - use a structuring element with the given radius. If no structuring element, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations ''' dilated_image = grey_dilation(image, radius, mask, footprint) return grey_erosion(dilated_image, radius, mask, footprint)
python
{ "resource": "" }
q17736
openlines
train
def openlines(image, linelength=10, dAngle=10, mask=None): """ Do a morphological opening along lines of different angles. Return difference between max and min response to different angles for each pixel. This effectively removes dots and only keeps lines. image - pixel image to operate on length - length of the structural element angluar_resolution - angle step for the rotating lines mask - if present, only use unmasked pixels for operations """ nAngles = 180//dAngle openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype) for iAngle in range(nAngles): angle = dAngle * iAngle se = strel_line(linelength,angle) openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se) imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0) return imLines
python
{ "resource": "" }
q17737
pattern_of
train
def pattern_of(index): '''Return the pattern represented by an index value''' return np.array([[index & 2**0,index & 2**1,index & 2**2], [index & 2**3,index & 2**4,index & 2**5], [index & 2**6,index & 2**7,index & 2**8]], bool)
python
{ "resource": "" }
q17738
index_of
train
def index_of(pattern): '''Return the index of a given pattern''' return (pattern[0,0] * 2**0 + pattern[0,1] * 2**1 + pattern[0,2] * 2**2 + pattern[1,0] * 2**3 + pattern[1,1] * 2**4 + pattern[1,2] * 2**5 + pattern[2,0] * 2**6 + pattern[2,1] * 2**7 + pattern[2,2] * 2**8)
python
{ "resource": "" }
q17739
make_table
train
def make_table(value, pattern, care=np.ones((3,3),bool)): '''Return a table suitable for table_lookup value - set all table entries matching "pattern" to "value", all others to not "value" pattern - a 3x3 boolean array with the pattern to match care - a 3x3 boolean array where each value is true if the pattern must match at that position and false if we don't care if the pattern matches at that position. ''' def fn(index, p,i,j): '''Return true if bit position "p" in index matches pattern''' return ((((index & 2**p) > 0) == pattern[i,j]) or not care[i,j]) return np.array([value if (fn(i,0,0,0) and fn(i,1,0,1) and fn(i,2,0,2) and fn(i,3,1,0) and fn(i,4,1,1) and fn(i,5,1,2) and fn(i,6,2,0) and fn(i,7,2,1) and fn(i,8,2,2)) else not value for i in range(512)], bool)
python
{ "resource": "" }
q17740
branchpoints
train
def branchpoints(image, mask=None): '''Remove all pixels from an image except for branchpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 1 ? 0 ? 0 1 0 -> 0 1 0 0 1 0 0 ? 0 ''' global branchpoints_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, branchpoints_table, False, 1) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17741
branchings
train
def branchings(image, mask=None): '''Count the number of branches eminating from each pixel image - a binary image mask - optional mask of pixels not to consider This is the count of the number of branches that eminate from a pixel. A pixel with neighbors fore and aft has branches fore and aft = 2. An endpoint has one branch. A fork has 3. Finally, there's the quadrabranch which has 4: 1 0 1 0 1 0 -> 4 1 0 1 ''' global branchings_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False # # Not a binary operation, so we do a convolution with the following # kernel to get the indices into the table. # kernel = np.array([[1,2,4], [8,16,32], [64,128,256]]) indexer = scind.convolve(masked_image.astype(int), kernel, mode='constant').astype(int) result = branchings_table[indexer] return result
python
{ "resource": "" }
q17742
bridge
train
def bridge(image, mask=None, iterations = 1): '''Fill in pixels that bridge gaps. 1 0 0 1 0 0 0 0 0 -> 0 1 0 0 0 1 0 0 1 ''' global bridge_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, bridge_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17743
clean
train
def clean(image, mask=None, iterations = 1): '''Remove isolated pixels 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 0 0 0 0 Border pixels and pixels adjoining masks are removed unless one valid neighbor is true. ''' global clean_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, clean_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17744
diag
train
def diag(image, mask=None, iterations=1): '''4-connect pixels that are 8-connected 0 0 0 0 0 ? 0 0 1 -> 0 1 1 0 1 0 ? 1 ? ''' global diag_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, diag_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17745
endpoints
train
def endpoints(image, mask=None): '''Remove all pixels from an image except for endpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 0 ? 0 0 0 1 0 -> 0 1 0 0 0 0 0 0 0 ''' global endpoints_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, endpoints_table, False, 1) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17746
fill
train
def fill(image, mask=None, iterations=1): '''Fill isolated black pixels 1 1 1 1 1 1 1 0 1 -> 1 1 1 1 1 1 1 1 1 ''' global fill_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = True result = table_lookup(masked_image, fill_table, True, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17747
fill4
train
def fill4(image, mask=None, iterations=1): '''Fill 4-connected black pixels x 1 x x 1 x 1 0 1 -> 1 1 1 x 1 x x 1 x ''' global fill4_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = True result = table_lookup(masked_image, fill4_table, True, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17748
majority
train
def majority(image, mask=None, iterations=1): '''A pixel takes the value of the majority of its neighbors ''' global majority_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, majority_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17749
remove
train
def remove(image, mask=None, iterations=1): '''Turn 1 pixels to 0 if their 4-connected neighbors are all 0 ? 1 ? ? 1 ? 1 1 1 -> 1 0 1 ? 1 ? ? 1 ? ''' global remove_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, remove_table, False) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17750
spur
train
def spur(image, mask=None, iterations=1): '''Remove spur pixels from an image 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 1 0 0 ? ''' global spur_table_1,spur_table_2 if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, False) if iterations is None: iterations = len(index_i) for i in range(iterations): for table in (spur_table_1, spur_table_2): index_i, index_j = index_lookup(index_i, index_j, masked_image, table, 1) masked_image = extract_from_image_lookup(image, index_i, index_j) if not mask is None: masked_image[~mask] = image[~mask] return masked_image
python
{ "resource": "" }
q17751
thicken
train
def thicken(image, mask=None, iterations=1): '''Thicken the objects in an image where doing so does not connect them 0 0 0 ? ? ? 0 0 0 -> ? 1 ? 0 0 1 ? ? ? 1 0 0 ? ? ? 0 0 0 -> ? 0 ? 0 0 1 ? ? ? ''' global thicken_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, thicken_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17752
distance_color_labels
train
def distance_color_labels(labels): '''Recolor a labels matrix so that adjacent labels have distant numbers ''' # # Color labels so adjacent ones are most distant # colors = color_labels(labels, True) # # Order pixels by color, then label # # rlabels = labels.ravel() order = np.lexsort((rlabels, colors.ravel())) # # Construct color indices with the cumsum trick: # cumsum([0,0,1,0,1]) = [0,0,1,1,2] # and copy back into the color array, using the order. # different = np.hstack([[rlabels[order[0]] > 0], rlabels[order[1:]] != rlabels[order[:-1]]]) # We need to careful about ravel() returning a new object, but in the usual # case of colors having order='C', this won't create any copies. rcolor = colors.ravel() rcolor[order] = np.cumsum(different).astype(colors.dtype) return rcolor.reshape(colors.shape).astype(labels.dtype)
python
{ "resource": "" }
q17753
skeletonize
train
def skeletonize(image, mask=None, ordering = None): '''Skeletonize the image Take the distance transform. Order the 1 points by the distance transform. Remove a point if it has more than 1 neighbor and if removing it does not change the Euler number. image - the binary image to be skeletonized mask - only skeletonize pixels within the mask ordering - a matrix of the same dimensions as the image. The matrix provides the ordering of the erosion with the lowest values being eroded first. The default is to use the distance transform. ''' global eight_connect if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False # # Lookup table - start with only positive pixels. # Keep if # pixels in neighborhood is 2 or less # Keep if removing the pixel results in a different connectivity # table = (make_table(True,np.array([[0,0,0],[0,1,0],[0,0,0]],bool), np.array([[0,0,0],[0,1,0],[0,0,0]],bool)) & (np.array([scind.label(pattern_of(index), eight_connect)[1] != scind.label(pattern_of(index & ~ 2**4), eight_connect)[1] for index in range(512) ]) | np.array([np.sum(pattern_of(index))<3 for index in range(512)]))) if ordering is None: distance = scind.distance_transform_edt(masked_image) else: distance = ordering # # The processing order along the edge is critical to the shape of the # resulting skeleton: if you process a corner first, that corner will # be eroded and the skeleton will miss the arm from that corner. Pixels # with fewer neighbors are more "cornery" and should be processed last. # cornerness_table = np.array([9-np.sum(pattern_of(index)) for index in range(512)]) corner_score = table_lookup(masked_image, cornerness_table, False,1) i,j = np.mgrid[0:image.shape[0],0:image.shape[1]] result=masked_image.copy() distance = distance[result] i = np.ascontiguousarray(i[result],np.int32) j = np.ascontiguousarray(j[result],np.int32) result=np.ascontiguousarray(result,np.uint8) # # We use a random # for tiebreaking. Assign each pixel in the image a # predictable, random # so that masking doesn't affect arbitrary choices # of skeletons # np.random.seed(0) tiebreaker=np.random.permutation(np.arange(np.product(masked_image.shape))) tiebreaker.shape=masked_image.shape order = np.lexsort((tiebreaker[masked_image], corner_score[masked_image], distance)) order = np.ascontiguousarray(order, np.int32) table = np.ascontiguousarray(table, np.uint8) skeletonize_loop(result, i, j, order, table) result = result.astype(bool) if not mask is None: result[~mask] = image[~mask] return result
python
{ "resource": "" }
q17754
skeletonize_labels
train
def skeletonize_labels(labels): '''Skeletonize a labels matrix''' # # The trick here is to separate touching labels by coloring the # labels matrix and then processing each color separately # colors = color_labels(labels) max_color = np.max(colors) if max_color == 0: return labels result = np.zeros(labels.shape, labels.dtype) for i in range(1,max_color+1): mask = skeletonize(colors==i) result[mask] = labels[mask] return result
python
{ "resource": "" }
q17755
skeleton_length
train
def skeleton_length(labels, indices=None): '''Compute the length of all skeleton branches for labeled skeletons labels - a labels matrix indices - the indexes of the labels to be measured. Default is all returns an array of one skeleton length per label. ''' global __skel_length_table if __skel_length_table is None: tbl = np.zeros(512, np.float32) for ii in range(-1, 2): for jj in range(-1, 2): if ii == 0 and jj == 0: continue # # Set the bit to search for and the center bit # idx = 2 ** (ii + 1 + (jj + 1) * 3) | 16 mask = (np.arange(512) & idx) == idx # # If we are four-connected to another pixel that is # connected to this one, they are 8-connected and that # is the distance. # # bad good # x 1 0 0 0 0 # x 1 1 0 1 0 # x x x 0 x 1 if ii != 0 and jj != 0: for adjacent_i, adjacent_j in ( (ii-1, jj), (ii, jj-1), (ii+1, jj), (ii, jj+1)): if any([_ < -1 or _ > 1 for _ in (adjacent_i, adjacent_j)]): continue aidx = 2 ** (adjacent_i+1 + (adjacent_j + 1) * 3) mask = mask & ((np.arange(512) & aidx) != aidx) tbl[mask] += np.sqrt(ii*ii + jj*jj) / 2 __skel_length_table = tbl if indices is None: indices = np.arange(1, np.max(labels)+1) else: indices = np.asanyarray(indices) if len(indices) == 0: return np.zeros(0) score = __skel_length_table[table_idx_from_labels(labels)] result = np.bincount(labels.ravel(), weights = score.ravel(), minlength=np.max(indices)+1) return result[indices]
python
{ "resource": "" }
q17756
distance_to_edge
train
def distance_to_edge(labels): '''Compute the distance of a pixel to the edge of its object labels - a labels matrix returns a matrix of distances ''' colors = color_labels(labels) max_color = np.max(colors) result = np.zeros(labels.shape) if max_color == 0: return result for i in range(1, max_color+1): mask = (colors==i) result[mask] = scind.distance_transform_edt(mask)[mask] return result
python
{ "resource": "" }
q17757
is_local_maximum
train
def is_local_maximum(image, labels, footprint): '''Return a boolean array of points that are local maxima image - intensity image labels - find maxima only within labels. Zero is reserved for background. footprint - binary mask indicating the neighborhood to be examined must be a matrix with odd dimensions, center is taken to be the point in question. ''' assert((np.all(footprint.shape) & 1) == 1) footprint = (footprint != 0) footprint_extent = (np.array(footprint.shape)-1) // 2 if np.all(footprint_extent == 0): return labels > 0 result = (labels > 0).copy() # # Create a labels matrix with zeros at the borders that might be # hit by the footprint. # big_labels = np.zeros(np.array(labels.shape) + footprint_extent*2, labels.dtype) big_labels[[slice(fe,-fe) for fe in footprint_extent]] = labels # # Find the relative indexes of each footprint element # image_strides = np.array(image.strides) // image.dtype.itemsize big_strides = np.array(big_labels.strides) // big_labels.dtype.itemsize result_strides = np.array(result.strides) // result.dtype.itemsize footprint_offsets = np.mgrid[[slice(-fe,fe+1) for fe in footprint_extent]] footprint_offsets = footprint_offsets[:, footprint] # # Order by distance, low to high and get rid of center pt. # d = np.sum(footprint_offsets **2, 0) footprint_offsets, d = footprint_offsets[:, d > 0], d[d > 0] footprint_offsets = footprint_offsets[:, np.lexsort([d])] fp_image_offsets = np.sum(image_strides[:, np.newaxis] * footprint_offsets, 0) fp_big_offsets = np.sum(big_strides[:, np.newaxis] * footprint_offsets, 0) # # Get the index of each labeled pixel in the image and big_labels arrays # indexes = np.mgrid[[slice(0,x) for x in labels.shape]][:, labels > 0] image_indexes = np.sum(image_strides[:, np.newaxis] * indexes, 0) big_indexes = np.sum(big_strides[:, np.newaxis] * (indexes + footprint_extent[:, np.newaxis]), 0) result_indexes = np.sum(result_strides[:, np.newaxis] * indexes, 0) # # Now operate on the raveled images # big_labels_raveled = big_labels.ravel() image_raveled = image.ravel() result_raveled = result.ravel() # # A hit is a hit if the label at the offset matches the label at the pixel # and if the intensity at the pixel is greater or equal to the intensity # at the offset. # for fp_image_offset, fp_big_offset in zip(fp_image_offsets, fp_big_offsets): same_label = (big_labels_raveled[big_indexes + fp_big_offset] == big_labels_raveled[big_indexes]) less_than = (image_raveled[image_indexes[same_label]] < image_raveled[image_indexes[same_label]+ fp_image_offset]) mask = ~same_label mask[same_label] = ~less_than result_raveled[result_indexes[~mask]] = False result_indexes = result_indexes[mask] big_indexes = big_indexes[mask] image_indexes = image_indexes[mask] return result
python
{ "resource": "" }
q17758
is_obtuse
train
def is_obtuse(p1, v, p2): '''Determine whether the angle, p1 - v - p2 is obtuse p1 - N x 2 array of coordinates of first point on edge v - N x 2 array of vertex coordinates p2 - N x 2 array of coordinates of second point on edge returns vector of booleans ''' p1x = p1[:,1] p1y = p1[:,0] p2x = p2[:,1] p2y = p2[:,0] vx = v[:,1] vy = v[:,0] Dx = vx - p2x Dy = vy - p2y Dvp1x = p1x - vx Dvp1y = p1y - vy return Dvp1x * Dx + Dvp1y * Dy > 0
python
{ "resource": "" }
q17759
stretch
train
def stretch(image, mask=None): '''Normalize an image to make the minimum zero and maximum one image - pixel data to be normalized mask - optional mask of relevant pixels. None = don't mask returns the stretched image ''' image = np.array(image, float) if np.product(image.shape) == 0: return image if mask is None: minval = np.min(image) maxval = np.max(image) if minval == maxval: if minval < 0: return np.zeros_like(image) elif minval > 1: return np.ones_like(image) return image else: return (image - minval) / (maxval - minval) else: significant_pixels = image[mask] if significant_pixels.size == 0: return image minval = np.min(significant_pixels) maxval = np.max(significant_pixels) if minval == maxval: transformed_image = minval else: transformed_image = ((significant_pixels - minval) / (maxval - minval)) result = image.copy() image[mask] = transformed_image return image
python
{ "resource": "" }
q17760
median_filter
train
def median_filter(data, mask, radius, percent=50): '''Masked median filter with octagonal shape data - array of data to be median filtered. mask - mask of significant pixels in data radius - the radius of a circle inscribed into the filtering octagon percent - conceptually, order the significant pixels in the octagon, count them and choose the pixel indexed by the percent times the count divided by 100. More simply, 50 = median returns a filtered array. In areas where the median filter does not overlap the mask, the filtered result is undefined, but in practice, it will be the lowest value in the valid area. ''' if mask is None: mask = np.ones(data.shape, dtype=bool) if np.all(~ mask): return data.copy() # # Normalize the ranked data to 0-255 # if (not np.issubdtype(data.dtype, np.int) or np.min(data) < 0 or np.max(data) > 255): ranked_data, translation = rank_order(data[mask], nbins=255) was_ranked = True else: ranked_data = data[mask] was_ranked = False input = np.zeros(data.shape, np.uint8 ) input[mask] = ranked_data mmask = np.ascontiguousarray(mask, np.uint8) output = np.zeros(data.shape, np.uint8) _filter.median_filter(input, mmask, output, radius, percent) if was_ranked: result = translation[output] else: result = output return result
python
{ "resource": "" }
q17761
bilateral_filter
train
def bilateral_filter(image, mask, sigma_spatial, sigma_range, sampling_spatial = None, sampling_range = None): """Bilateral filter of an image image - image to be bilaterally filtered mask - mask of significant points in image sigma_spatial - standard deviation of the spatial Gaussian sigma_range - standard deviation of the range Gaussian sampling_spatial - amt to reduce image array extents when sampling default is 1/2 sigma_spatial sampling_range - amt to reduce the range of values when sampling default is 1/2 sigma_range The bilateral filter is described by the following equation: sum(Fs(||p - q||)Fr(|Ip - Iq|)Iq) / sum(Fs(||p-q||)Fr(|Ip - Iq)) where the sum is over all points in the kernel p is all coordinates in the image q is the coordinates as perturbed by the mask Ip is the intensity at p Iq is the intensity at q Fs is the spatial convolution function, for us a Gaussian that falls off as the distance between falls off Fr is the "range" distance which falls off as the difference in intensity increases. 1 / sum(Fs(||p-q||)Fr(|Ip - Iq)) is the weighting for point p """ # The algorithm is taken largely from code by Jiawen Chen which miraculously # extends to the masked case: # http://groups.csail.mit.edu/graphics/bilagrid/bilagrid_web.pdf # # Form a 3-d array whose extent is reduced in the i,j directions # by the spatial sampling parameter and whose extent is reduced in the # z (image intensity) direction by the range sampling parameter. # Scatter each significant pixel in the image into the nearest downsampled # array address where the pixel's i,j coordinate gives the corresponding # i and j in the matrix and the intensity value gives the corresponding z # in the array. # Count the # of values entered into each 3-d array element to form a # weight. # Similarly convolve the downsampled value and weight arrays with a 3-d # Gaussian kernel whose i and j Gaussian is the sigma_spatial and whose # z is the sigma_range. # # Divide the value by the weight to scale each z value appropriately # # Linearly interpolate using an i x j x 3 array where [:,:,0] is the # i coordinate in the downsampled array, [:,:,1] is the j coordinate # and [:,:,2] is the unrounded index of the z-slot # # One difference is that I don't pad the intermediate arrays. The # weights bleed off the edges of the intermediate arrays and this # accounts for the ring of zero values used at the border bleeding # back into the intermediate arrays during convolution # if sampling_spatial is None: sampling_spatial = sigma_spatial / 2.0 if sampling_range is None: sampling_range = sigma_range / 2.0 if np.all(np.logical_not(mask)): return image masked_image = image[mask] image_min = np.min(masked_image) image_max = np.max(masked_image) image_delta = image_max - image_min if image_delta == 0: return image # # ds = downsampled. Calculate the ds array sizes and sigmas. # ds_sigma_spatial = sigma_spatial / sampling_spatial ds_sigma_range = sigma_range / sampling_range ds_i_limit = int(image.shape[0] / sampling_spatial) + 2 ds_j_limit = int(image.shape[1] / sampling_spatial) + 2 ds_z_limit = int(image_delta / sampling_range) + 2 grid_data = np.zeros((ds_i_limit, ds_j_limit, ds_z_limit)) grid_weights = np.zeros((ds_i_limit, ds_j_limit, ds_z_limit)) # # Compute the downsampled i, j and z coordinates at each point # di,dj = np.mgrid[0:image.shape[0], 0:image.shape[1]].astype(float) / sampling_spatial dz = (masked_image - image_min) / sampling_range # # Treat this as a list of 3-d coordinates from now on # di = di[mask] dj = dj[mask] # # scatter the unmasked image points into the data array and # scatter a value of 1 per point into the weights # grid_data[(di + .5).astype(int), (dj + .5).astype(int), (dz + .5).astype(int)] += masked_image grid_weights[(di + .5).astype(int), (dj + .5).astype(int), (dz + .5).astype(int)] += 1 # # Make a Gaussian kernel # kernel_spatial_limit = int(2 * ds_sigma_spatial) + 1 kernel_range_limit = int(2 * ds_sigma_range) + 1 ki,kj,kz = np.mgrid[-kernel_spatial_limit : kernel_spatial_limit+1, -kernel_spatial_limit : kernel_spatial_limit+1, -kernel_range_limit : kernel_range_limit+1] kernel = np.exp(-.5 * ((ki**2 + kj**2) / ds_sigma_spatial ** 2 + kz**2 / ds_sigma_range ** 2)) blurred_grid_data = convolve(grid_data, kernel, mode='constant') blurred_weights = convolve(grid_weights, kernel, mode='constant') weight_mask = blurred_weights > 0 normalized_blurred_grid = np.zeros(grid_data.shape) normalized_blurred_grid[weight_mask] = ( blurred_grid_data[weight_mask] / blurred_weights[weight_mask]) # # Now use di, dj and dz to find the coordinate of the point within # the blurred grid to use. We actually interpolate between points # here (both in the i,j direction to get intermediate z values and in # the z direction to get the slot, roughly where we put our original value) # dijz = np.vstack((di, dj, dz)) image_copy = image.copy() image_copy[mask] = map_coordinates(normalized_blurred_grid, dijz, order = 1) return image_copy
python
{ "resource": "" }
q17762
laplacian_of_gaussian
train
def laplacian_of_gaussian(image, mask, size, sigma): '''Perform the Laplacian of Gaussian transform on the image image - 2-d image array mask - binary mask of significant pixels size - length of side of square kernel to use sigma - standard deviation of the Gaussian ''' half_size = size//2 i,j = np.mgrid[-half_size:half_size+1, -half_size:half_size+1].astype(float) / float(sigma) distance = (i**2 + j**2)/2 gaussian = np.exp(-distance) # # Normalize the Gaussian # gaussian = gaussian / np.sum(gaussian) log = (distance - 1) * gaussian # # Normalize the kernel to have a sum of zero # log = log - np.mean(log) if mask is None: mask = np.ones(image.shape[:2], bool) masked_image = image.copy() masked_image[~mask] = 0 output = convolve(masked_image, log, mode='constant', cval=0) # # Do the LoG of the inverse of the mask. This finds the magnitude of the # contribution of the masked pixels. We then fudge by multiplying by the # value at the pixel of interest - this effectively sets the value at a # masked pixel to that of the pixel of interest. # # It underestimates the LoG, that's not a terrible thing. # correction = convolve((~ mask).astype(float), log, mode='constant', cval = 1) output += correction * image output[~ mask] = image[~ mask] return output
python
{ "resource": "" }
q17763
roberts
train
def roberts(image, mask=None): '''Find edges using the Roberts algorithm image - the image to process mask - mask of relevant points The algorithm returns the magnitude of the output of the two Roberts convolution kernels. The following is the canonical citation for the algorithm: L. Roberts Machine Perception of 3-D Solids, Optical and Electro-optical Information Processing, MIT Press 1965. The following website has a tutorial on the algorithm: http://homepages.inf.ed.ac.uk/rbf/HIPR2/roberts.htm ''' result = np.zeros(image.shape) # # Four quadrants and two convolutions: # # q0,0 | q0,1 1 | 0 anti-diagonal # q1,0 | q1,1 0 | -1 # # q-1,0 | q0,0 0 | 1 diagonal # q-1,1 | q0,1 -1 | 0 # # Points near the mask edges and image edges are computed unreliably # so make them zero (no edge) in the result # if mask is None: mask = np.ones(image.shape, bool) big_mask = binary_erosion(mask, generate_binary_structure(2,2), border_value = 0) result[big_mask==False] = 0 q00 = image[:,:][big_mask] q11 = image[1:,1:][big_mask[:-1,:-1]] qm11 = image[:-1,1:][big_mask[1:,:-1]] diagonal = q00 - qm11 anti_diagonal = q00 - q11 result[big_mask] = np.sqrt(diagonal*diagonal + anti_diagonal*anti_diagonal) return result
python
{ "resource": "" }
q17764
sobel
train
def sobel(image, mask=None): '''Calculate the absolute magnitude Sobel to find the edges image - image to process mask - mask of relevant points Take the square root of the sum of the squares of the horizontal and vertical Sobels to get a magnitude that's somewhat insensitive to direction. Note that scipy's Sobel returns a directional Sobel which isn't useful for edge detection in its raw form. ''' return np.sqrt(hsobel(image,mask)**2 + vsobel(image,mask)**2)
python
{ "resource": "" }
q17765
prewitt
train
def prewitt(image, mask=None): '''Find the edge magnitude using the Prewitt transform image - image to process mask - mask of relevant points Return the square root of the sum of squares of the horizontal and vertical Prewitt transforms. ''' return np.sqrt(hprewitt(image,mask)**2 + vprewitt(image,mask)**2)
python
{ "resource": "" }
q17766
hprewitt
train
def hprewitt(image, mask=None): '''Find the horizontal edges of an image using the Prewitt transform image - image to process mask - mask of relevant points We use the following kernel and return the absolute value of the result at each point: 1 1 1 0 0 0 -1 -1 -1 ''' if mask is None: mask = np.ones(image.shape, bool) big_mask = binary_erosion(mask, generate_binary_structure(2,2), border_value = 0) result = np.abs(convolve(image, np.array([[ 1, 1, 1], [ 0, 0, 0], [-1,-1,-1]]).astype(float)/3.0)) result[big_mask==False] = 0 return result
python
{ "resource": "" }
q17767
gabor
train
def gabor(image, labels, frequency, theta): '''Gabor-filter the objects in an image image - 2-d grayscale image to filter labels - a similarly shaped labels matrix frequency - cycles per trip around the circle theta - angle of the filter. 0 to 2 pi Calculate the Gabor filter centered on the centroids of each object in the image. Summing the resulting image over the labels matrix will yield a texture measure per object. ''' # # The code inscribes the X and Y position of each pixel relative to # the centroid of that pixel's object. After that, the Gabor filter # for the image can be calculated per-pixel and the image can be # multiplied by the filter to get the filtered image. # nobjects = np.max(labels) if nobjects == 0: return image centers = centers_of_labels(labels) areas = fix(scind.sum(np.ones(image.shape),labels, np.arange(nobjects, dtype=np.int32)+1)) mask = labels > 0 i,j = np.mgrid[0:image.shape[0],0:image.shape[1]].astype(float) i = i[mask] j = j[mask] image = image[mask] lm = labels[mask] - 1 i -= centers[0,lm] j -= centers[1,lm] sigma = np.sqrt(areas/np.pi) / 3.0 sigma = sigma[lm] g_exp = 1000.0/(2.0*np.pi*sigma**2) * np.exp(-(i**2 + j**2)/(2*sigma**2)) g_angle = 2*np.pi/frequency*(i*np.cos(theta)+j*np.sin(theta)) g_cos = g_exp * np.cos(g_angle) g_sin = g_exp * np.sin(g_angle) # # Normalize so that the sum of the filter over each object is zero # and so that there is no bias-value within each object. # g_cos_mean = fix(scind.mean(g_cos,lm, np.arange(nobjects))) i_mean = fix(scind.mean(image, lm, np.arange(nobjects))) i_norm = image - i_mean[lm] g_sin_mean = fix(scind.mean(g_sin,lm, np.arange(nobjects))) g_cos -= g_cos_mean[lm] g_sin -= g_sin_mean[lm] g = np.zeros(mask.shape,dtype=np.complex) g[mask] = i_norm *g_cos+i_norm * g_sin*1j return g
python
{ "resource": "" }
q17768
enhance_dark_holes
train
def enhance_dark_holes(image, min_radius, max_radius, mask=None): '''Enhance dark holes using a rolling ball filter image - grayscale 2-d image radii - a vector of radii: we enhance holes at each given radius ''' # # Do 4-connected erosion # se = np.array([[False, True, False], [True, True, True], [False, True, False]]) # # Invert the intensities # inverted_image = image.max() - image previous_reconstructed_image = inverted_image eroded_image = inverted_image smoothed_image = np.zeros(image.shape) for i in range(max_radius+1): eroded_image = grey_erosion(eroded_image, mask=mask, footprint = se) reconstructed_image = grey_reconstruction(eroded_image, inverted_image, footprint = se) output_image = previous_reconstructed_image - reconstructed_image if i >= min_radius: smoothed_image = np.maximum(smoothed_image,output_image) previous_reconstructed_image = reconstructed_image return smoothed_image
python
{ "resource": "" }
q17769
granulometry_filter
train
def granulometry_filter(image, min_radius, max_radius, mask=None): '''Enhances bright structures within a min and max radius using a rolling ball filter image - grayscale 2-d image radii - a vector of radii: we enhance holes at each given radius ''' # # Do 4-connected erosion # se = np.array([[False, True, False], [True, True, True], [False, True, False]]) # # Initialize # inverted_image = image.max() - image previous_opened_image = image eroded_image = image selected_granules_image = np.zeros(image.shape) # # Select granules by successive morphological openings # for i in range(max_radius+1): eroded_image = grey_erosion(eroded_image, mask=mask, footprint = se) opened_image = grey_dilation(eroded_image, inverted_image, footprint = se) output_image = previous_opened_image - opened_image if i >= min_radius: selected_granules_image = np.maximum(selected_granules_image, output_image) previous_opened_image = opened_image return selected_granules_image
python
{ "resource": "" }
q17770
velocity_kalman_model
train
def velocity_kalman_model(): '''Return a KalmanState set up to model objects with constant velocity The observation and measurement vectors are i,j. The state vector is i,j,vi,vj ''' om = np.array([[1,0,0,0], [0, 1, 0, 0]]) tm = np.array([[1,0,1,0], [0,1,0,1], [0,0,1,0], [0,0,0,1]]) return KalmanState(om, tm)
python
{ "resource": "" }
q17771
reverse_velocity_kalman_model
train
def reverse_velocity_kalman_model(): '''Return a KalmanState set up to model going backwards in time''' om = np.array([[1,0,0,0], [0, 1, 0, 0]]) tm = np.array([[1,0,-1,0], [0,1,0,-1], [0,0,1,0], [0,0,0,1]]) return KalmanState(om, tm)
python
{ "resource": "" }
q17772
line_integration
train
def line_integration(image, angle, decay, sigma): '''Integrate the image along the given angle DIC images are the directional derivative of the underlying image. This filter reconstructs the original image by integrating along that direction. image - a 2-dimensional array angle - shear angle in radians. We integrate perpendicular to this angle decay - an exponential decay applied to the integration sigma - the standard deviation of a Gaussian which is used to smooth the image in the direction parallel to the shear angle. ''' # # Normalize the image so that the mean is zero # normalized = image - np.mean(image) # # Rotate the image so the J direction is perpendicular to the shear angle. # rotated = scind.rotate(normalized, -angle) # # Smooth in only the i direction # smoothed = scind.gaussian_filter1d(rotated, sigma) if sigma > 0 else rotated # # We want img_out[:,j+1] to be img_out[:,j] * decay + img[j+1] # Could be done by convolution with a ramp, maybe in FFT domain, # but we just do a bunch of steps here. # result_fwd = smoothed.copy() for i in range(1,result_fwd.shape[0]): result_fwd[i] += result_fwd[i-1] * decay result_rev = smoothed.copy() for i in reversed(range(result_rev.shape[0]-1)): result_rev[i] += result_rev[i+1] * decay result = (result_fwd - result_rev) / 2 # # Rotate and chop result # result = scind.rotate(result, angle) ipad = int((result.shape[0] - image.shape[0]) / 2) jpad = int((result.shape[1] - image.shape[1]) / 2) result = result[ipad:(ipad + image.shape[0]), jpad:(jpad + image.shape[1])] # # Scale the resultant image similarly to the output. # img_min, img_max = np.min(image), np.max(image) result_min, result_max = np.min(result), np.max(result) if (img_min == img_max) or (result_min == result_max): return np.zeros(result.shape) result = (result - result_min) / (result_max - result_min) result = img_min + result * (img_max - img_min) return result
python
{ "resource": "" }
q17773
variance_transform
train
def variance_transform(img, sigma, mask=None): '''Calculate a weighted variance of the image This function caluclates the variance of an image, weighting the local contributions by a Gaussian. img - image to be transformed sigma - standard deviation of the Gaussian mask - mask of relevant pixels in the image ''' if mask is None: mask = np.ones(img.shape, bool) else: img = img.copy() img[~mask] = 0 # # This is the Gaussian of the mask... so we can normalize for # pixels near the edge of the mask # gmask = scind.gaussian_filter(mask.astype(float), sigma, mode = 'constant') img_mean = scind.gaussian_filter(img, sigma, mode = 'constant') / gmask img_squared = scind.gaussian_filter(img ** 2, sigma, mode = 'constant') / gmask var = img_squared - img_mean ** 2 return var
python
{ "resource": "" }
q17774
inv_n
train
def inv_n(x): '''given N matrices, return N inverses''' # # The inverse of a small matrix (e.g. 3x3) is # # 1 # ----- C(j,i) # det(A) # # where C(j,i) is the cofactor of matrix A at position j,i # assert x.ndim == 3 assert x.shape[1] == x.shape[2] c = np.array([ [cofactor_n(x, j, i) * (1 - ((i+j) % 2)*2) for j in range(x.shape[1])] for i in range(x.shape[1])]).transpose(2,0,1) return c / det_n(x)[:, np.newaxis, np.newaxis]
python
{ "resource": "" }
q17775
det_n
train
def det_n(x): '''given N matrices, return N determinants''' assert x.ndim == 3 assert x.shape[1] == x.shape[2] if x.shape[1] == 1: return x[:,0,0] result = np.zeros(x.shape[0]) for permutation in permutations(np.arange(x.shape[1])): sign = parity(permutation) result += np.prod([x[:, i, permutation[i]] for i in range(x.shape[1])], 0) * sign sign = - sign return result
python
{ "resource": "" }
q17776
parity
train
def parity(x): '''The parity of a permutation The parity of a permutation is even if the permutation can be formed by an even number of transpositions and is odd otherwise. The parity of a permutation is even if there are an even number of compositions of even size and odd otherwise. A composition is a cycle: for instance in (1, 2, 0, 3), there is the cycle: (0->1, 1->2, 2->0) and the cycle, (3->3). Both cycles are odd, so the parity is even: you can exchange 0 and 1 giving (0, 2, 1, 3) and 2 and 1 to get (0, 1, 2, 3) ''' order = np.lexsort((x,)) hit = np.zeros(len(x), bool) p = 0 for j in range(len(x)): if not hit[j]: cycle = 1 i = order[j] # mark every node in a cycle while i != j: hit[i] = True i = order[i] cycle += 1 p += cycle - 1 return 1 if p % 2 == 0 else -1
python
{ "resource": "" }
q17777
dot_n
train
def dot_n(x, y): '''given two tensors N x I x K and N x K x J return N dot products If either x or y is 2-dimensional, broadcast it over all N. Dot products are size N x I x J. Example: x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]]) y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]]) print dot_n(x,y) array([[[ 9, 12, 15], [ 19, 26, 33], [ 29, 40, 51]], [[129, 144, 159], [163, 182, 201], [197, 220, 243]]]) ''' if x.ndim == 2: if y.ndim == 2: return np.dot(x, y) x3 = False y3 = True nlen = y.shape[0] elif y.ndim == 2: nlen = x.shape[0] x3 = True y3 = False else: assert x.shape[0] == y.shape[0] nlen = x.shape[0] x3 = True y3 = True assert x.shape[1+x3] == y.shape[0+y3] n, i, j, k = np.mgrid[0:nlen, 0:x.shape[0+x3], 0:y.shape[1+y3], 0:y.shape[0+y3]] return np.sum((x[n, i, k] if x3 else x[i,k]) * (y[n, k, j] if y3 else y[k,j]), 3)
python
{ "resource": "" }
q17778
permutations
train
def permutations(x): '''Given a listlike, x, return all permutations of x Returns the permutations of x in the lexical order of their indices: e.g. >>> x = [ 1, 2, 3, 4 ] >>> for p in permutations(x): >>> print p [ 1, 2, 3, 4 ] [ 1, 2, 4, 3 ] [ 1, 3, 2, 4 ] [ 1, 3, 4, 2 ] [ 1, 4, 2, 3 ] [ 1, 4, 3, 2 ] [ 2, 1, 3, 4 ] ... [ 4, 3, 2, 1 ] ''' # # The algorithm is attributed to Narayana Pandit from his # Ganita Kaumundi (1356). The following is from # # http://en.wikipedia.org/wiki/Permutation#Systematic_generation_of_all_permutations # # 1. Find the largest index k such that a[k] < a[k + 1]. # If no such index exists, the permutation is the last permutation. # 2. Find the largest index l such that a[k] < a[l]. # Since k + 1 is such an index, l is well defined and satisfies k < l. # 3. Swap a[k] with a[l]. # 4. Reverse the sequence from a[k + 1] up to and including the final # element a[n]. # yield list(x) # don't forget to do the first one x = np.array(x) a = np.arange(len(x)) while True: # 1 - find largest or stop ak_lt_ak_next = np.argwhere(a[:-1] < a[1:]) if len(ak_lt_ak_next) == 0: raise StopIteration() k = ak_lt_ak_next[-1, 0] # 2 - find largest a[l] < a[k] ak_lt_al = np.argwhere(a[k] < a) l = ak_lt_al[-1, 0] # 3 - swap a[k], a[l] = (a[l], a[k]) # 4 - reverse if k < len(x)-1: a[k+1:] = a[:k:-1].copy() yield x[a].tolist()
python
{ "resource": "" }
q17779
circular_hough
train
def circular_hough(img, radius, nangles = None, mask=None): '''Circular Hough transform of an image img - image to be transformed. radius - radius of circle nangles - # of angles to measure, e.g. nangles = 4 means accumulate at 0, 90, 180 and 270 degrees. Return the Hough transform of the image which is the accumulators for the transform x + r cos t, y + r sin t. ''' a = np.zeros(img.shape) m = np.zeros(img.shape) if nangles is None: # if no angle specified, take the circumference # Round to a multiple of 4 to make it bilaterally stable nangles = int(np.pi * radius + 3.5) & (~ 3) for i in range(nangles): theta = 2*np.pi * float(i) / float(nangles) x = int(np.round(radius * np.cos(theta))) y = int(np.round(radius * np.sin(theta))) xmin = max(0, -x) xmax = min(img.shape[1] - x, img.shape[1]) ymin = max(0, -y) ymax = min(img.shape[0] - y, img.shape[0]) dest = (slice(ymin, ymax), slice(xmin, xmax)) src = (slice(ymin+y, ymax+y), slice(xmin+x, xmax+x)) if mask is not None: a[dest][mask[src]] += img[src][mask[src]] m[dest][mask[src]] += 1 else: a[dest] += img[src] m[dest] += 1 a[m > 0] /= m[m > 0] return a
python
{ "resource": "" }
q17780
poisson_equation
train
def poisson_equation(image, gradient=1, max_iter=100, convergence=.01, percentile = 90.0): '''Estimate the solution to the Poisson Equation The Poisson Equation is the solution to gradient(x) = h^2/4 and, in this context, we use a boundary condition where x is zero for background pixels. Also, we set h^2/4 = 1 to indicate that each pixel is a distance of 1 from its neighbors. The estimation exits after max_iter iterations or if the given percentile of foreground pixels differ by less than the convergence fraction from one pass to the next. Some ideas taken from Gorelick, "Shape representation and classification using the Poisson Equation", IEEE Transactions on Pattern Analysis and Machine Intelligence V28, # 12, 2006 image - binary image with foreground as True gradient - the target gradient between 4-adjacent pixels max_iter - maximum # of iterations at a given level convergence - target fractional difference between values from previous and next pass percentile - measure convergence at this percentile ''' # Evaluate the poisson equation with zero-padded boundaries pe = np.zeros((image.shape[0]+2, image.shape[1]+2)) if image.shape[0] > 64 and image.shape[1] > 64: # # Sub-sample to get seed values # sub_image = image[::2, ::2] sub_pe = poisson_equation(sub_image, gradient=gradient*2, max_iter=max_iter, convergence=convergence) coordinates = np.mgrid[0:(sub_pe.shape[0]*2), 0:(sub_pe.shape[1]*2)].astype(float) / 2 pe[1:(sub_image.shape[0]*2+1), 1:(sub_image.shape[1]*2+1)] = \ scind.map_coordinates(sub_pe, coordinates, order=1) pe[:image.shape[0], :image.shape[1]][~image] = 0 else: pe[1:-1,1:-1] = image # # evaluate only at i and j within the foreground # i, j = np.mgrid[0:pe.shape[0], 0:pe.shape[1]] mask = (i>0) & (i<pe.shape[0]-1) & (j>0) & (j<pe.shape[1]-1) mask[mask] = image[i[mask]-1, j[mask]-1] i = i[mask] j = j[mask] if len(i) == 0: return pe[1:-1, 1:-1] if len(i) == 1: # Just in case "percentile" can't work when unable to interpolate # between a single value... Isolated pixels have value = 1 # pe[mask] = 1 return pe[1:-1, 1:-1] for itr in range(max_iter): next_pe = (pe[i+1, j] + pe[i-1, j] + pe[i, j+1] + pe[i, j-1]) / 4 + 1 difference = np.abs((pe[mask] - next_pe) / next_pe) pe[mask] = next_pe if np.percentile(difference, percentile) <= convergence: break return pe[1:-1, 1:-1]
python
{ "resource": "" }
q17781
KalmanState.predicted_state_vec
train
def predicted_state_vec(self): '''The predicted state vector for the next time point From Welch eqn 1.9 ''' if not self.has_cached_predicted_state_vec: self.p_state_vec = dot_n( self.translation_matrix, self.state_vec[:, :, np.newaxis])[:,:,0] return self.p_state_vec
python
{ "resource": "" }
q17782
KalmanState.predicted_obs_vec
train
def predicted_obs_vec(self): '''The predicted observation vector The observation vector for the next step in the filter. ''' if not self.has_cached_obs_vec: self.obs_vec = dot_n( self.observation_matrix, self.predicted_state_vec[:,:,np.newaxis])[:,:,0] return self.obs_vec
python
{ "resource": "" }
q17783
KalmanState.map_frames
train
def map_frames(self, old_indices): '''Rewrite the feature indexes based on the next frame's identities old_indices - for each feature in the new frame, the index of the old feature ''' nfeatures = len(old_indices) noldfeatures = len(self.state_vec) if nfeatures > 0: self.state_vec = self.state_vec[old_indices] self.state_cov = self.state_cov[old_indices] self.noise_var = self.noise_var[old_indices] if self.has_cached_obs_vec: self.obs_vec = self.obs_vec[old_indices] if self.has_cached_predicted_state_vec: self.p_state_vec = self.p_state_vec[old_indices] if len(self.state_noise_idx) > 0: # # We have to renumber the new_state_noise indices and get rid # of those that don't map to numbers. Typical index trick here: # * create an array for each legal old element: -1 = no match # * give each old element in the array the new number # * Filter out the "no match" elements. # reverse_indices = -np.ones(noldfeatures, int) reverse_indices[old_indices] = np.arange(nfeatures) self.state_noise_idx = reverse_indices[self.state_noise_idx] self.state_noise = self.state_noise[self.state_noise_idx != -1,:] self.state_noise_idx = self.state_noise_idx[self.state_noise_idx != -1]
python
{ "resource": "" }
q17784
KalmanState.add_features
train
def add_features(self, kept_indices, new_indices, new_state_vec, new_state_cov, new_noise_var): '''Add new features to the state kept_indices - the mapping from all indices in the state to new indices in the new version new_indices - the indices of the new features in the new version new_state_vec - the state vectors for the new indices new_state_cov - the covariance matrices for the new indices new_noise_var - the noise variances for the new indices ''' assert len(kept_indices) == len(self.state_vec) assert len(new_indices) == len(new_state_vec) assert len(new_indices) == len(new_state_cov) assert len(new_indices) == len(new_noise_var) if self.has_cached_obs_vec: del self.obs_vec if self.has_cached_predicted_state_vec: del self.predicted_obs_vec nfeatures = len(kept_indices) + len(new_indices) next_state_vec = np.zeros((nfeatures, self.state_len)) next_state_cov = np.zeros((nfeatures, self.state_len, self.state_len)) next_noise_var = np.zeros((nfeatures, self.state_len)) if len(kept_indices) > 0: next_state_vec[kept_indices] = self.state_vec next_state_cov[kept_indices] = self.state_cov next_noise_var[kept_indices] = self.noise_var if len(self.state_noise_idx) > 0: self.state_noise_idx = kept_indices[self.state_noise_idx] if len(new_indices) > 0: next_state_vec[new_indices] = new_state_vec next_state_cov[new_indices] = new_state_cov next_noise_var[new_indices] = new_noise_var self.state_vec = next_state_vec self.state_cov = next_state_cov self.noise_var = next_noise_var
python
{ "resource": "" }
q17785
KalmanState.deep_copy
train
def deep_copy(self): '''Return a deep copy of the state''' c = KalmanState(self.observation_matrix, self.translation_matrix) c.state_vec = self.state_vec.copy() c.state_cov = self.state_cov.copy() c.noise_var = self.noise_var.copy() c.state_noise = self.state_noise.copy() c.state_noise_idx = self.state_noise_idx.copy() return c
python
{ "resource": "" }
q17786
spline_factors
train
def spline_factors(u): '''u is np.array''' X = np.array([(1.-u)**3 , 4-(6.*(u**2))+(3.*(u**3)) , 1.+(3.*u)+(3.*(u**2))-(3.*(u**3)) , u**3]) * (1./6) return X
python
{ "resource": "" }
q17787
gauss
train
def gauss(x,m_y,sigma): '''returns the gaussian with mean m_y and std. dev. sigma, calculated at the points of x.''' e_y = [np.exp((1.0/(2*float(sigma)**2)*-(n-m_y)**2)) for n in np.array(x)] y = [1.0/(float(sigma) * np.sqrt(2 * np.pi)) * e for e in e_y] return np.array(y)
python
{ "resource": "" }
q17788
d2gauss
train
def d2gauss(x,m_y,sigma): '''returns the second derivative of the gaussian with mean m_y, and standard deviation sigma, calculated at the points of x.''' return gauss(x,m_y,sigma)*[-1/sigma**2 + (n-m_y)**2/sigma**4 for n in x]
python
{ "resource": "" }
q17789
spline_matrix2d
train
def spline_matrix2d(x,y,px,py,mask=None): '''For boundary constraints, the first two and last two spline pieces are constrained to be part of the same cubic curve.''' V = np.kron(spline_matrix(x,px),spline_matrix(y,py)) lenV = len(V) if mask is not None: indices = np.nonzero(mask.T.flatten()) if len(indices)>1: indices = np.nonzero(mask.T.flatten())[1][0] newV=V.T[indices] V=newV.T V=V.reshape((V.shape[0],V.shape[1])) return V
python
{ "resource": "" }
q17790
otsu
train
def otsu(data, min_threshold=None, max_threshold=None,bins=256): """Compute a threshold using Otsu's method data - an array of intensity values between zero and one min_threshold - only consider thresholds above this minimum value max_threshold - only consider thresholds below this maximum value bins - we bin the data into this many equally-spaced bins, then pick the bin index that optimizes the metric """ assert min_threshold is None or max_threshold is None or min_threshold < max_threshold def constrain(threshold): if not min_threshold is None and threshold < min_threshold: threshold = min_threshold if not max_threshold is None and threshold > max_threshold: threshold = max_threshold return threshold data = np.atleast_1d(data) data = data[~ np.isnan(data)] if len(data) == 0: return (min_threshold if not min_threshold is None else max_threshold if not max_threshold is None else 0) elif len(data) == 1: return constrain(data[0]) if bins > len(data): bins = len(data) data.sort() var = running_variance(data) rvar = np.flipud(running_variance(np.flipud(data))) thresholds = data[1:len(data):len(data)//bins] score_low = (var[0:len(data)-1:len(data)//bins] * np.arange(0,len(data)-1,len(data)//bins)) score_high = (rvar[1:len(data):len(data)//bins] * (len(data) - np.arange(1,len(data),len(data)//bins))) scores = score_low + score_high if len(scores) == 0: return constrain(thresholds[0]) index = np.argwhere(scores == scores.min()).flatten() if len(index)==0: return constrain(thresholds[0]) # # Take the average of the thresholds to either side of # the chosen value to get an intermediate in cases where there is # a steep step between the background and foreground index = index[0] if index == 0: index_low = 0 else: index_low = index-1 if index == len(thresholds)-1: index_high = len(thresholds)-1 else: index_high = index+1 return constrain((thresholds[index_low]+thresholds[index_high]) / 2)
python
{ "resource": "" }
q17791
entropy
train
def entropy(data, bins=256): """Compute a threshold using Ray's entropy measurement data - an array of intensity values between zero and one bins - we bin the data into this many equally-spaced bins, then pick the bin index that optimizes the metric """ data = np.atleast_1d(data) data = data[~ np.isnan(data)] if len(data) == 0: return 0 elif len(data) == 1: return data[0] if bins > len(data): bins = len(data) data.sort() var = running_variance(data)+1.0/512.0 rvar = np.flipud(running_variance(np.flipud(data)))+1.0/512.0 thresholds = data[1:len(data):len(data)//bins] w = np.arange(0,len(data)-1,len(data)//bins) score_low = w * np.log(var[0:len(data)-1:len(data)//bins] * w * np.sqrt(2*np.pi*np.exp(1))) score_low[np.isnan(score_low)]=0 w = len(data) - np.arange(1,len(data),len(data)//bins) score_high = w * np.log(rvar[1:len(data):len(data)//bins] * w * np.sqrt(2*np.pi*np.exp(1))) score_high[np.isnan(score_high)]=0 scores = score_low + score_high index = np.argwhere(scores == scores.min()).flatten() if len(index)==0: return thresholds[0] # # Take the average of the thresholds to either side of # the chosen value to get an intermediate in cases where there is # a steep step between the background and foreground index = index[0] if index == 0: index_low = 0 else: index_low = index-1 if index == len(thresholds)-1: index_high = len(thresholds)-1 else: index_high = index+1 return (thresholds[index_low]+thresholds[index_high]) / 2
python
{ "resource": "" }
q17792
otsu3
train
def otsu3(data, min_threshold=None, max_threshold=None,bins=128): """Compute a threshold using a 3-category Otsu-like method data - an array of intensity values between zero and one min_threshold - only consider thresholds above this minimum value max_threshold - only consider thresholds below this maximum value bins - we bin the data into this many equally-spaced bins, then pick the bin index that optimizes the metric We find the maximum weighted variance, breaking the histogram into three pieces. Returns the lower and upper thresholds """ assert min_threshold is None or max_threshold is None or min_threshold < max_threshold # # Compute the running variance and reverse running variance. # data = np.atleast_1d(data) data = data[~ np.isnan(data)] data.sort() if len(data) == 0: return 0 var = running_variance(data) rvar = np.flipud(running_variance(np.flipud(data))) if bins > len(data): bins = len(data) bin_len = int(len(data)//bins) thresholds = data[0:len(data):bin_len] score_low = (var[0:len(data):bin_len] * np.arange(0,len(data),bin_len)) score_high = (rvar[0:len(data):bin_len] * (len(data) - np.arange(0,len(data),bin_len))) # # Compute the middles # cs = data.cumsum() cs2 = (data**2).cumsum() i,j = np.mgrid[0:score_low.shape[0],0:score_high.shape[0]]*bin_len diff = (j-i).astype(float) w = diff mean = (cs[j] - cs[i]) / diff mean2 = (cs2[j] - cs2[i]) / diff score_middle = w * (mean2 - mean**2) score_middle[i >= j] = np.Inf score = score_low[i*bins//len(data)] + score_middle + score_high[j*bins//len(data)] best_score = np.min(score) best_i_j = np.argwhere(score==best_score) return (thresholds[best_i_j[0,0]],thresholds[best_i_j[0,1]])
python
{ "resource": "" }
q17793
outline
train
def outline(labels): """Given a label matrix, return a matrix of the outlines of the labeled objects If a pixel is not zero and has at least one neighbor with a different value, then it is part of the outline. """ output = numpy.zeros(labels.shape, labels.dtype) lr_different = labels[1:,:]!=labels[:-1,:] ud_different = labels[:,1:]!=labels[:,:-1] d1_different = labels[1:,1:]!=labels[:-1,:-1] d2_different = labels[1:,:-1]!=labels[:-1,1:] different = numpy.zeros(labels.shape, bool) different[1:,:][lr_different] = True different[:-1,:][lr_different] = True different[:,1:][ud_different] = True different[:,:-1][ud_different] = True different[1:,1:][d1_different] = True different[:-1,:-1][d1_different] = True different[1:,:-1][d2_different] = True different[:-1,1:][d2_different] = True # # Labels on edges need outlines # different[0,:] = True different[:,0] = True different[-1,:] = True different[:,-1] = True output[different] = labels[different] return output
python
{ "resource": "" }
q17794
euclidean_dist
train
def euclidean_dist(point1, point2): """Compute the Euclidean distance between two points. Parameters ---------- point1, point2 : 2-tuples of float The input points. Returns ------- d : float The distance between the input points. Examples -------- >>> point1 = (1.0, 2.0) >>> point2 = (4.0, 6.0) # (3., 4.) away, simplest Pythagorean triangle >>> euclidean_dist(point1, point2) 5.0 """ (x1, y1) = point1 (x2, y2) = point2 return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
python
{ "resource": "" }
q17795
Trace.from_detections_assignment
train
def from_detections_assignment(detections_1, detections_2, assignments): """ Creates traces out of given assignment and cell data. """ traces = [] for d1n, d2n in six.iteritems(assignments): # check if the match is between existing cells if d1n < len(detections_1) and d2n < len(detections_2): traces.append(Trace(detections_1[d1n], detections_2[d2n])) return traces
python
{ "resource": "" }
q17796
NeighbourMovementTracking.run_tracking
train
def run_tracking(self, label_image_1, label_image_2): """ Tracks cells between input label images. @returns: injective function from old objects to new objects (pairs of [old, new]). Number are compatible with labels. """ self.scale = self.parameters_tracking["avgCellDiameter"] / 35.0 detections_1 = self.derive_detections(label_image_1) detections_2 = self.derive_detections(label_image_2) # Calculate tracking based on cell features and position. traces = self.find_initials_traces(detections_1, detections_2) # Use neighbourhoods to improve tracking. for _ in range(int(self.parameters_tracking["iterations"])): traces = self.improve_traces(detections_1, detections_2, traces) # Filter traces. return [(trace.previous_cell.number, trace.current_cell.number) for trace in traces]
python
{ "resource": "" }
q17797
NeighbourMovementTracking.is_cell_big
train
def is_cell_big(self, cell_detection): """ Check if the cell is considered big. @param CellFeature cell_detection: @return: """ return cell_detection.area > self.parameters_tracking["big_size"] * self.scale * self.scale
python
{ "resource": "" }
q17798
NeighbourMovementTracking.calculate_basic_cost
train
def calculate_basic_cost(self, d1, d2): """ Calculates assignment cost between two cells. """ distance = euclidean_dist(d1.center, d2.center) / self.scale area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area) return distance + self.parameters_cost_initial["area_weight"] * area_change
python
{ "resource": "" }
q17799
NeighbourMovementTracking.calculate_localised_cost
train
def calculate_localised_cost(self, d1, d2, neighbours, motions): """ Calculates assignment cost between two cells taking into account the movement of cells neighbours. :param CellFeatures d1: detection in first frame :param CellFeatures d2: detection in second frame """ my_nbrs_with_motion = [n for n in neighbours[d1] if n in motions] my_motion = (d1.center[0] - d2.center[0], d1.center[1] - d2.center[1]) if my_nbrs_with_motion == []: distance = euclidean_dist(d1.center, d2.center) / self.scale else: # it is not in motions if there is no trace (cell is considered to vanish) distance = min([euclidean_dist(my_motion, motions[n]) for n in my_nbrs_with_motion]) / self.scale area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area) return distance + self.parameters_cost_iteration["area_weight"] * area_change
python
{ "resource": "" }