Search is not available for this dataset
text
stringlengths
75
104k
def url(self, suffix=""): """ Return a constructed URL, appending an optional suffix (uri path). Arguments: suffix (str : ""): The suffix to append to the end of the URL Returns: str: The complete URL """ return super(neuroRemote, self).url('{}/'.format(self._ext) + suffix)
def reserve_ids(self, token, channel, quantity): """ Requests a list of next-available-IDs from the server. Arguments: quantity (int): The number of IDs to reserve Returns: int[quantity]: List of IDs you've been granted """ quantity = str(quantity) url = self.url("{}/{}/reserve/{}/".format(token, channel, quantity)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Invalid req: ' + req.status_code) out = req.json() return [out[0] + i for i in range(out[1])]
def merge_ids(self, token, channel, ids, delete=False): """ Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore """ url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not merge ids {}'.format( ','.join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
def create_channels(self, dataset, token, new_channels_data): """ Creates channels given a dictionary in 'new_channels_data' , 'dataset' name, and 'token' (project) name. Arguments: token (str): Token to identify project dataset (str): Dataset name to identify dataset to download from new_channels_data (dict): New channel data to upload into new channels Returns: bool: Process completed succesfully or not """ channels = {} for channel_new in new_channels_data: self._check_channel(channel_new.name) if channel_new.channel_type not in ['image', 'annotation']: raise ValueError('Channel type must be ' + 'neuroRemote.IMAGE or ' + 'neuroRemote.ANNOTATION.') if channel_new.readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") channels[channel_new.name] = { "channel_name": channel_new.name, "channel_type": channel_new.channel_type, "datatype": channel_new.dtype, "readonly": channel_new.readonly * 1 } req = requests.post(self.url("/{}/project/".format(dataset) + "{}".format(token)), json={"channels": {channels}}, verify=False) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) else: return True
def propagate(self, token, channel): """ Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success """ if self.get_propagate_status(token, channel) != u'0': return url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) return True
def get_propagate_status(self, token, channel): """ Get the propagate status for a token/channel pair. Arguments: token (str): The token to check channel (str): The channel to check Returns: str: The status code """ url = self.url('sd/{}/{}/getPropagate/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise ValueError('Bad pair: {}/{}'.format(token, channel)) return req.text
def create_project(self, project_name, dataset_name, hostname, is_public, s3backend=0, kvserver='localhost', kvengine='MySQL', mdengine='MySQL', description=''): """ Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created. """ url = self.url() + "/resource/dataset/{}".format( dataset_name) + "/project/{}/".format(project_name) json = { "project_name": project_name, "host": hostname, "s3backend": s3backend, "public": is_public, "kvserver": kvserver, "kvengine": kvengine, "mdengine": mdengine, "project_description": description } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req)) if req.content == "" or req.content == b'': return True else: return False
def list_projects(self, dataset_name): """ Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ url = self.url() + '/nd/resource/dataset/{}'.format( dataset_name) + '/project/{}'.format(project_name) + \ '/token/{}/'.format(token_name) json = { "token_name": token_name, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Cout not upload {}:'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def get_token(self, token_name, project_name, dataset_name): """ Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not find {}'.format(req.text)) else: return req.json()
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError("Could not delete {}".format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def list_tokens(self): """ Lists a set of tokens that are public in Neurodata. Arguments: Returns: dict: Public tokens found in Neurodata """ url = self.url() + "/nd/resource/public/token/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Coud not find {}'.format(req.text)) else: return req.json()
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): """ Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not """ url = self.url() + "/resource/dataset/{}".format(name) json = { "dataset_name": name, "ximagesize": x_img_size, "yimagesize": y_img_size, "zimagesize": z_img_size, "xvoxelres": x_vox_res, "yvoxelres": y_vox_res, "zvoxelres": z_vox_res, "xoffset": x_offset, "yoffset": y_offset, "zoffset": z_offset, "scalinglevels": scaling_levels, "scalingoption": scaling_option, "dataset_description": dataset_description, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def get_dataset(self, name): """ Returns info regarding a particular dataset. Arugments: name (str): Dataset name Returns: dict: Dataset information """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def list_datasets(self, get_global_public): """ Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format """ appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def delete_dataset(self, name): """ Arguments: name (str): Name of dataset to delete Returns: bool: True if dataset deleted, False if not """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError('Could not delete {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ self._check_channel(channel_name) if channel_type not in ['image', 'annotation', 'timeseries']: raise ValueError('Channel type must be ' + 'neurodata.IMAGE or neurodata.ANNOTATION.') if readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") # Good job! You supplied very nice arguments. url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) json = { "channel_name": channel_name, "channel_type": channel_type, "channel_datatype": dtype, "startwindow": startwindow, "endwindow": endwindow, 'starttime': start_time, 'endtime': end_time, 'readonly': readonly, 'propagate': propagate, 'resolution': resolution, 'channel_description': channel_description } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def get_channel(self, channel_name, project_name, dataset_name): """ Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def parse(self): """Parse show subcommand.""" parser = self.subparser.add_parser( "show", help="Show workspace details", description="Show workspace details.") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help="All workspaces") group.add_argument('name', type=str, help="Workspace name", nargs='?')
def execute(self, args): """Execute show subcommand.""" if args.name is not None: self.show_workspace(slashes2dash(args.name)) elif args.all is not None: self.show_all()
def show_workspace(self, name): """Show specific workspace.""" if not self.workspace.exists(name): raise ValueError("Workspace `%s` doesn't exists." % name) color = Color() workspaces = self.workspace.list() self.logger.info("<== %s workspace ==>" % color.colored(name, "green")) self.logger.info("\tPath: %s" % workspaces[name]["path"]) self.logger.info("\tNumber of repositories: %s" % color.colored( len(workspaces[name]["repositories"]), "yellow")) repo_colored = color.colored("Repositories", "blue") path_colored = color.colored("Path", "blue") trepositories = PrettyTable( [repo_colored, path_colored, color.colored("+", "blue")]) trepositories.align[repo_colored] = "l" trepositories.align[path_colored] = "l" for repo_name in workspaces[name]["repositories"]: fullname = "%s/%s" % (name, repo_name) fullpath = find_path(fullname, self.config)[fullname] try: repo = Repository(fullpath) repo_scm = repo.get_scm() except RepositoryAdapterNotFound: repo_scm = None trepositories.add_row( [color.colored(repo_name, "cyan"), fullpath, repo_scm]) self.logger.info(trepositories)
def show_all(self): """Show details for all workspaces.""" for ws in self.workspace.list().keys(): self.show_workspace(ws) print("\n\n")
def url(self, endpoint=''): """ Get the base URL of the Remote. Arguments: None Returns: `str` base URL """ if not endpoint.startswith('/'): endpoint = "/" + endpoint return self.protocol + "://" + self.hostname + endpoint
def ping(self, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = requests.get(self.url() + "/" + endpoint) return r.status_code
def export_dae(filename, cutout, level=0): """ Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".dae" not in filename: filename = filename + ".dae" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_mesh(vs, fs, filename, "ndioexport")
def export_obj(filename, cutout, level=0): """ Converts a dense annotation to a obj, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".obj" not in filename: filename = filename + ".obj" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_obj(vs, fs, filename)
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
def _guess_format_from_extension(ext): """ Guess the appropriate data type from file extension. Arguments: ext: The file extension (period optional) Returns: String. The format (without leading period), or False if none was found or couldn't be guessed """ ext = ext.strip('.') # We look through FILE_FORMATS for this extension. # - If it appears zero times, return False. We can't guess. # - If it appears once, we can simply return that format. # - If it appears more than once, we can't guess (it's ambiguous, # e.g .m = RAMON or MATLAB) formats = [] for fmt in FILE_FORMATS: if ext in FILE_FORMATS[fmt]: formats.append(fmt) if formats == [] or len(formats) > 1: return False return formats[0]
def open(in_file, in_fmt=None): """ Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray """ fmt = in_file.split('.')[-1] if in_fmt: fmt = in_fmt fmt = fmt.lower() if fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']: return Image.open(in_file) else: raise NotImplementedError("Cannot open file of type {fmt}".format(fmt))
def convert(in_file, out_file, in_fmt="", out_fmt=""): """ Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename """ # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
def build_graph(self, project, site, subject, session, scan, size, email=None, invariants=Invariants.ALL, fiber_file=DEFAULT_FIBER_FILE, atlas_file=None, use_threads=False, callback=None): """ Builds a graph using the graph-services endpoint. Arguments: project (str): The project to use site (str): The site in question subject (str): The subject's identifier session (str): The session (per subject) scan (str): The scan identifier size (str): Whether to return a big (grute.BIG) or small (grute.SMALL) graph. For a better explanation, see m2g.io. email (str : self.email)*: An email to notify invariants (str[]: Invariants.ALL)*: An array of invariants to compute. You can use the grute.Invariants class to construct a list, or simply pass grute.Invariants.ALL to compute them all. fiber_file (str: DEFAULT_FIBER_FILE)*: A local filename of an MRI Studio .dat file atlas_file (str: None)*: A local atlas file, in NIFTI .nii format. If none is specified, the Desikan atlas is used by default. use_threads (bool: False)*: Whether to run the download in a Python thread. If set to True, the call to `build_graph` will end quickly, and the `callback` will be called with the returned status-code of the restful call as its only argument. callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: When the supplied values are invalid (contain invalid characters, bad email address supplied, etc.) RemoteDataNotFoundError: When the data cannot be processed due to a server error. """ if email is None: email = self.email if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") # Once we get here, we know the callback is if size not in [self.BIG, self.SMALL]: raise ValueError("size must be either grute.BIG or grute.SMALL.") url = "buildgraph/{}/{}/{}/{}/{}/{}/{}/{}/".format( project, site, subject, session, scan, size, email, "/".join(invariants) ) if " " in url: raise ValueError("Arguments must not contain spaces.") if use_threads: # Run in the background. download_thread = threading.Thread( target=self._run_build_graph, args=[url, fiber_file, atlas_file, callback] ) download_thread.start() else: # Run in the foreground. return self._run_build_graph(url, fiber_file, atlas_file) return
def compute_invariants(self, graph_file, input_format, invariants=Invariants.ALL, email=None, use_threads=False, callback=None): """ Compute invariants from an existing GraphML file using the remote grute graph services. Arguments: graph_file (str): The filename of the graphml file input_format (str): One of grute.GraphFormats invariants (str[]: Invariants.ALL)*: An array of grute.Invariants to compute on the graph email (str: self.email)*: The email to notify upon completion use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server to return the invariants callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: If the graph file does not exist, or if there are issues with the passed arguments RemoteDataUploadError: If there is an issue packing the file RemoteError: If the server experiences difficulty computing invs """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format, {}.".format(input_format)) if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") url = "graphupload/{}/{}/{}/".format( email, input_format, "/".join(invariants) ) if " " in url: raise ValueError("Arguments cannot have spaces in them.") if not (os.path.exists(graph_file)): raise ValueError("File {} does not exist.".format(graph_file)) if use_threads: # Run in the background. upload_thread = threading.Thread( target=self._run_compute_invariants, args=[url, graph_file, callback] ) upload_thread.start() else: # Run in the foreground. return self._run_compute_invariants(url, graph_file) return
def convert_graph(self, graph_file, input_format, output_formats, email=None, use_threads=False, callback=None): """ Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format {}.".format(input_format)) if not set(output_formats) <= set(GraphFormats._any): raise ValueError("Output formats must be a GraphFormats.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") if not (os.path.exists(graph_file)): raise ValueError("No such file, {}!".format(graph_file)) url = "convert/{}/{}/{}/l".format( email, input_format, ','.join(output_formats) ) if " " in url: raise ValueError("Spaces are not permitted in arguments.") if use_threads: # Run in the background. convert_thread = threading.Thread( target=self._run_convert_graph, args=[url, graph_file, callback] ) convert_thread.start() else: # Run in the foreground. return self._run_convert_graph(url, graph_file) return
def to_dict(ramons, flatten=False): """ Converts a RAMON object list to a JSON-style dictionary. Useful for going from an array of RAMONs to a dictionary, indexed by ID. Arguments: ramons (RAMON[]): A list of RAMON objects flatten (boolean: False): Not implemented Returns: dict: A python dictionary of RAMON objects. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } return out_ramons
def to_json(ramons, flatten=False): """ Converts RAMON objects into a JSON string which can be directly written out to a .json file. You can pass either a single RAMON or a list. If you pass a single RAMON, it will still be exported with the ID as the key. In other words: type(from_json(to_json(ramon))) # ALWAYS returns a list ...even if `type(ramon)` is a RAMON, not a list. Arguments: ramons (RAMON or list): The RAMON object(s) to convert to JSON. flatten (bool : False): If ID should be used as a key. If not, then a single JSON document is returned. Returns: str: The JSON representation of the RAMON objects, in the schema: ``` { <id>: { type: . . . , metadata: { . . . } }, } ``` Raises: ValueError: If an invalid RAMON is passed. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } if flatten: return jsonlib.dumps(out_ramons.values()[0]) return jsonlib.dumps(out_ramons)
def from_json(json, cutout=None): """ Converts JSON to a python list of RAMON objects. if `cutout` is provided, the `cutout` attribute of the RAMON object is populated. Otherwise, it's left empty. `json` should be an ID-level dictionary, like so: { 16: { type: "segment", metadata: { . . . } }, } NOTE: If more than one item is in the dictionary, then a Python list of RAMON objects is returned instead of a single RAMON. Arguments: json (str or dict): The JSON to import to RAMON objects cutout: Currently not supported. Returns: [RAMON] """ if type(json) is str: json = jsonlib.loads(json) out_ramons = [] for (rid, rdata) in six.iteritems(json): _md = rdata['metadata'] r = AnnotationType.RAMON(rdata['type'])( id=rid, author=_md['author'], status=_md['status'], confidence=_md['confidence'], kvpairs=copy.deepcopy(_md['kvpairs']) ) if rdata['type'] == 'segment': r.segmentclass = _md.get('segmentclass') r.neuron = _md.get('neuron') if 'synapses' in _md: r.synapses = _md['synapses'][:] if 'organelles' in _md: r.organelles = _md['organelles'][:] elif rdata['type'] in ['neuron', 'synapse']: if 'segments' in _md: r.segments = _md['segments'][:] elif rdata['type'] == 'organelle': r.organelle_class = _md['organelleclass'][:] elif rdata['type'] == 'synapse': r.synapse_type = _md.get('synapse_type') r.weight = _md.get('weight') out_ramons.append(r) return out_ramons
def from_hdf5(hdf5, anno_id=None): """ Converts an HDF5 file to a RAMON object. Returns an object that is a child- -class of RAMON (though it's determined at run-time what type is returned). Accessing multiple IDs from the same file is not supported, because it's not dramatically faster to access each item in the hdf5 file at the same time It's semantically and computationally easier to run this function several times on the same file. Arguments: hdf5 (h5py.File): A h5py File object that holds RAMON data anno_id (int): The ID of the RAMON obj to extract from the file. This defaults to the first one (sorted) if none is specified. Returns: ndio.RAMON object """ if anno_id is None: # The user just wants the first item we find, so... Yeah. return from_hdf5(hdf5, list(hdf5.keys())[0]) # First, get the actual object we're going to download. anno_id = str(anno_id) if anno_id not in list(hdf5.keys()): raise ValueError("ID {} is not in this file. Options are: {}".format( anno_id, ", ".join(list(hdf5.keys())) )) anno = hdf5[anno_id] # anno now holds just the RAMON of interest # This is the most complicated line in here: It creates an object whose # type is conditional on the ANNOTATION_TYPE of the hdf5 object. try: r = AnnotationType.get_class(anno['ANNOTATION_TYPE'][0])() except: raise InvalidRAMONError("This is not a valid RAMON type.") # All RAMON types definitely have these attributes: metadata = anno['METADATA'] r.author = metadata['AUTHOR'][0] r.confidence = metadata['CONFIDENCE'][0] r.status = metadata['STATUS'][0] r.id = anno_id # These are a little tougher, some RAMON types have special attributes: if type(r) in [RAMONNeuron, RAMONSynapse]: r.segments = metadata['SEGMENTS'][()] if 'KVPAIRS' in metadata: kvs = metadata['KVPAIRS'][()][0].split() if len(kvs) != 0: for i in kvs: k, v = str(i).split(',') r.kvpairs[str(k)] = str(v) else: r.kvpairs = {} if issubclass(type(r), RAMONVolume): if 'CUTOUT' in anno: r.cutout = anno['CUTOUT'][()] if 'XYZOFFSET' in anno: r.cutout = anno['XYZOFFSET'][()] if 'RESOLUTION' in anno: r.cutout = anno['RESOLUTION'][()] if type(r) is RAMONSynapse: r.synapse_type = metadata['SYNAPSE_TYPE'][0] r.weight = metadata['WEIGHT'][0] if type(r) is RAMONSegment: if 'NEURON' in metadata: r.neuron = metadata['NEURON'][0] if 'PARENTSEED' in metadata: r.parent_seed = metadata['PARENTSEED'][0] if 'SEGMENTCLASS' in metadata: r.segmentclass = metadata['SEGMENTCLASS'][0] if 'SYNAPSES' in metadata: r.synapses = metadata['SYNAPSES'][()] if 'ORGANELLES' in metadata: r.organelles = metadata['ORGANELLES'][()] if type(r) is RAMONOrganelle: r.organelle_class = metadata['ORGANELLECLASS'][0] return r
def to_hdf5(ramon, hdf5=None): """ Exports a RAMON object to an HDF5 file object. Arguments: ramon (RAMON): A subclass of RAMONBase hdf5 (str): Export filename Returns: hdf5.File Raises: InvalidRAMONError: if you pass a non-RAMON object """ if issubclass(type(ramon), RAMONBase) is False: raise InvalidRAMONError("Invalid RAMON supplied to ramon.to_hdf5.") import h5py import numpy if hdf5 is None: tmpfile = tempfile.NamedTemporaryFile(delete=False) else: tmpfile = hdf5 with h5py.File(tmpfile.name, "a") as hdf5: # First we'll export things that all RAMON objects have in # common, starting with the Group that encompasses each ID: grp = hdf5.create_group(str(ramon.id)) grp.create_dataset("ANNOTATION_TYPE", (1,), numpy.uint32, data=AnnotationType.get_int(type(ramon))) if hasattr(ramon, 'cutout'): if ramon.cutout is not None: grp.create_dataset('CUTOUT', ramon.cutout.shape, ramon.cutout.dtype, data=ramon.cutout) grp.create_dataset('RESOLUTION', (1,), numpy.uint32, data=ramon.resolution) grp.create_dataset('XYZOFFSET', (3,), numpy.uint32, data=ramon.xyz_offset) # Next, add general metadata. metadata = grp.create_group('METADATA') metadata.create_dataset('AUTHOR', (1,), dtype=h5py.special_dtype(vlen=str), data=ramon.author) fstring = StringIO() csvw = csv.writer(fstring, delimiter=',') csvw.writerows([r for r in six.iteritems(ramon.kvpairs)]) metadata.create_dataset('KVPAIRS', (1,), dtype=h5py.special_dtype(vlen=str), data=fstring.getvalue()) metadata.create_dataset('CONFIDENCE', (1,), numpy.float, data=ramon.confidence) metadata.create_dataset('STATUS', (1,), numpy.uint32, data=ramon.status) # Finally, add type-specific metadata: if hasattr(ramon, 'segments'): metadata.create_dataset('SEGMENTS', data=numpy.asarray(ramon.segments, dtype=numpy.uint32)) if hasattr(ramon, 'synapse_type'): metadata.create_dataset('SYNAPSE_TYPE', (1,), numpy.uint32, data=ramon.synapse_type) if hasattr(ramon, 'weight'): metadata.create_dataset('WEIGHT', (1,), numpy.float, data=ramon.weight) if hasattr(ramon, 'neuron'): metadata.create_dataset('NEURON', (1,), numpy.uint32, data=ramon.neuron) if hasattr(ramon, 'segmentclass'): metadata.create_dataset('SEGMENTCLASS', (1,), numpy.uint32, data=ramon.segmentclass) if hasattr(ramon, 'synapses'): metadata.create_dataset('SYNAPSES', (len(ramon.synapses),), numpy.uint32, data=ramon.synapses) if hasattr(ramon, 'organelles'): metadata.create_dataset('ORGANELLES', (len(ramon.organelles),), numpy.uint32, data=ramon.organelles) if hasattr(ramon, 'organelle_class'): metadata.create_dataset('ORGANELLECLASS', (1,), numpy.uint32, data=ramon.organelle_class) hdf5.flush() tmpfile.seek(0) return tmpfile return False
def RAMON(typ): """ Takes str or int, returns class type """ if six.PY2: lookup = [str, unicode] elif six.PY3: lookup = [str] if type(typ) is int: return _ramon_types[typ] elif type(typ) in lookup: return _ramon_types[_types[typ]]
def get_xy_slice(self, token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution=0): """ Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data """ return self.data.get_xy_slice(token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution)
def get_volume(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data. """ return self.data.get_volume(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, block_size, neariso)
def get_cutout(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start=0, t_stop=1, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data. """ return self.data.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start, t_stop, resolution, block_size, neariso)
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): """ Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload. """ return self.data.post_cutout(token, channel, x_start, y_start, z_start, data, resolution)
def create_project(self, project_name, dataset_name, hostname, is_public, s3backend=0, kvserver='localhost', kvengine='MySQL', mdengine='MySQL', description=''): """ Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created. """ return self.resources.create_project(project_name, dataset_name, hostname, is_public, s3backend, kvserver, kvengine, mdengine, description)
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ return self.resources.create_token(token_name, project_name, dataset_name, is_public)
def get_token(self, token_name, project_name, dataset_name): """ Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info """ return self.resources.get_token(token_name, project_name, dataset_name)
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ return self.resources.delete_token(token_name, project_name, dataset_name)
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): """ Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not """ return self.resources.create_dataset(name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset, y_offset, z_offset, scaling_levels, scaling_option, dataset_description, is_public)
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ return self.resources.create_channel(channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly, start_time, end_time, propagate, resolution, channel_description)
def get_channel(self, channel_name, project_name, dataset_name): """ Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info """ return self.resources.get_channel(channel_name, project_name, dataset_name)
def delete_channel(self, channel_name, project_name, dataset_name): """ Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not """ return self.resources.delete_channel(channel_name, project_name, dataset_name)
def add_channel(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions=None, resolution=None, windowrange=None, readonly=None): """ Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None """ self.channels[channel_name] = [ channel_name.strip().replace(" ", ""), datatype, channel_type.lower(), data_url, file_format, file_type, exceptions, resolution, windowrange, readonly ]
def add_project(self, project_name, token_name=None, public=None): """ Arguments: project_name (str): Project name is the specific project within a dataset's name. If there is only one project associated with a dataset then standard convention is to name the project the same as its associated dataset. token_name (str): The token name is the default token. If you do not wish to specify one, a default one will be created for you with the same name as the project name. However, if the project is private you must specify a token. public (int): This option allows users to specify if they want the project/channels to be publicly viewable/search-able. (1, 0) = (TRUE, FALSE) Returns: None """ self.project = (project_name.strip().replace(" ", ""), token_name.strip().replace(" ", ""), public)
def add_dataset(self, dataset_name, imagesize, voxelres, offset=None, timerange=None, scalinglevels=None, scaling=None): """ Add a new dataset to the ingest. Arguments: dataset_name (str): Dataset Name is the overarching name of the research effort. Standard naming convention is to do LabNamePublicationYear or LeadResearcherCurrentYear. imagesize (int, int, int): Image size is the pixel count dimensions of the data. For example is the data is stored as a series of 100 slices each 2100x2000 pixel TIFF images, the X,Y,Z dimensions are (2100, 2000, 100). voxelres (float, float, float): Voxel Resolution is the number of voxels per unit pixel. We store X,Y,Z voxel resolution separately. offset (int, int, int): If your data is not well aligned and there is "excess" image data you do not wish to examine, but are present in your images, offset is how you specify where your actual image starts. Offset is provided a pixel coordinate offset from origin which specifies the "actual" origin of the image. The offset is for X,Y,Z dimensions. timerange (int, int): Time Range is a parameter to support storage of Time Series data, so the value of the tuple is a 0 to X range of how many images over time were taken. It takes 2 inputs timeStepStart and timeStepStop. scalinglevels (int): Scaling levels is the number of levels the data is scalable to (how many zoom levels are present in the data). The highest resolution of the data is at scaling level 0, and for each level up the data is down sampled by 2x2 (per slice). To learn more about the sampling service used, visit the the propagation service page. scaling (int): Scaling is the scaling method of the data being stored. 0 corresponds to a Z-slice orientation (as in a collection of tiff images in which each tiff is a slice on the z plane) where data will be scaled only on the xy plane, not the z plane. 1 corresponds to an isotropic orientation (in which each tiff is a slice on the y plane) where data is scaled along all axis. Returns: None """ self.dataset = (dataset_name.strip().replace(" ", ""), imagesize, voxelres, offset, timerange, scalinglevels, scaling)
def nd_json(self, dataset, project, channel_list, metadata): """ Genarate ND json object. """ nd_dict = {} nd_dict['dataset'] = self.dataset_dict(*dataset) nd_dict['project'] = self.project_dict(*project) nd_dict['metadata'] = metadata nd_dict['channels'] = {} for channel_name, value in channel_list.items(): nd_dict['channels'][channel_name] = self.channel_dict(*value) return json.dumps(nd_dict, sort_keys=True, indent=4)
def dataset_dict( self, dataset_name, imagesize, voxelres, offset, timerange, scalinglevels, scaling): """Generate the dataset dictionary""" dataset_dict = {} dataset_dict['dataset_name'] = dataset_name dataset_dict['imagesize'] = imagesize dataset_dict['voxelres'] = voxelres if offset is not None: dataset_dict['offset'] = offset if timerange is not None: dataset_dict['timerange'] = timerange if scalinglevels is not None: dataset_dict['scalinglevels'] = scalinglevels if scaling is not None: dataset_dict['scaling'] = scaling return dataset_dict
def channel_dict(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions, resolution, windowrange, readonly): """ Generate the project dictionary. """ channel_dict = {} channel_dict['channel_name'] = channel_name channel_dict['datatype'] = datatype channel_dict['channel_type'] = channel_type if exceptions is not None: channel_dict['exceptions'] = exceptions if resolution is not None: channel_dict['resolution'] = resolution if windowrange is not None: channel_dict['windowrange'] = windowrange if readonly is not None: channel_dict['readonly'] = readonly channel_dict['data_url'] = data_url channel_dict['file_format'] = file_format channel_dict['file_type'] = file_type return channel_dict
def project_dict(self, project_name, token_name, public): """ Genarate the project dictionary. """ project_dict = {} project_dict['project_name'] = project_name if token_name is not None: if token_name == '': project_dict['token_name'] = project_name else: project_dict['token_name'] = token_name else: project_dict['token_name'] = project_name if public is not None: project_dict['public'] = public return project_dict
def identify_imagesize(self, image_type, image_path='/tmp/img.'): """ Identify the image size using the data location and other parameters """ dims = () try: if (image_type.lower() == 'png'): dims = np.shape(ndpng.load('{}{}'.format( image_path, image_type ))) elif (image_type.lower() == 'tif' or image_type.lower() == 'tiff'): dims = np.shape(ndtiff.load('{}{}'.format( image_path, image_type ))) else: raise ValueError("Unsupported image type.") except: raise OSError('The file was not accessible at {}{}'.format( image_path, image_type )) return dims[::-1]
def verify_path(self, data, verifytype): """ Verify the path supplied. """ # Insert try and catch blocks try: token_name = data["project"]["token_name"] except: token_name = data["project"]["project_name"] channel_names = list(data["channels"].copy().keys()) imgsz = data['dataset']['imagesize'] for i in range(0, len(channel_names)): channel_type = data["channels"][ channel_names[i]]["channel_type"] path = data["channels"][channel_names[i]]["data_url"] aws_pattern = re.compile("^(http:\/\/)(.+)(\.s3\.amazonaws\.com)") file_type = data["channels"][channel_names[i]]["file_type"] if "offset" in data["dataset"]: offset = data["dataset"]["offset"][0] else: offset = 0 if (aws_pattern.match(path)): verifytype = VERIFY_BY_SLICE if (channel_type == "timeseries"): timerange = data["dataset"]["timerange"] try: assert(timerange[0] != timerange[1]) except AssertionError: raise ValueError('Timeseries values are the same, did you\ specify the time steps?') for j in range(timerange[0], timerange[1] + 1): # Test for tifs or such? Currently test for just not # empty if (verifytype == VERIFY_BY_FOLDER): work_path = "{}/{}/{}/time{}/".format( path, token_name, channel_names[i], ("%04d" % j)) elif (verifytype == VERIFY_BY_SLICE): work_path = "{}/{}/{}/time{}/{}.{}".format( path, token_name, channel_names[i], ("%04d" % j), ("%04d" % offset), file_type) else: raise TypeError('Incorrect verify method') # Check for accessibility try: if (verifytype == VERIFY_BY_FOLDER): resp = requests.head(work_path) assert(resp.status_code == 200) elif (verifytype == VERIFY_BY_SLICE): resp = requests.get( work_path, stream=True, verify=False) with open('/tmp/img.{}'.format(file_type), 'wb') as out_file: shutil.copyfileobj(resp.raw, out_file) out_file.close() assert(resp.status_code == 200) resp.close() except AssertionError: raise OSError('Files are not http accessible: \ Error: {}, Path: {}'.format(resp.status_code, work_path)) # Attempt to Verify imagesize here try: if (verifytype == VERIFY_BY_SLICE): assert(list(self.identify_imagesize(file_type)) == imgsz[0:2]) except: raise ValueError('File image size does not match\ provided image size.') else: # Test for tifs or such? Currently test for just not empty if (verifytype == VERIFY_BY_FOLDER): work_path = "{}/{}/{}/".format( path, token_name, channel_names[i]) elif (verifytype == VERIFY_BY_SLICE): work_path = "{}/{}/{}/{}.{}".format( path, token_name, channel_names[i], ("%04d" % offset), file_type) else: raise TypeError('Incorrect verify method') # Check for accessibility if (verifytype == VERIFY_BY_FOLDER): resp = requests.head(work_path) elif (verifytype == VERIFY_BY_SLICE): resp = requests.get(work_path, stream=True, verify=False) with open('/tmp/img.{}'.format(file_type), 'wb') as out_file: shutil.copyfileobj(resp.raw, out_file) out_file.close() resp.close() if (resp.status_code >= 300): raise OSError('Files are not http accessible: \ Error: {}, Path: {}'.format(resp.status_code, work_path)) # Attempt to Verify imagesize here try: if (verifytype == VERIFY_BY_SLICE): assert(list(self.identify_imagesize(file_type)) == imgsz[0:2]) except: raise ValueError('File image size does not match\ provided image size.')
def put_data(self, data): """ Try to post data to the server. """ URLPath = self.oo.url("autoIngest/") # URLPath = 'https://{}/ca/autoIngest/'.format(self.oo.site_host) try: response = requests.post(URLPath, data=json.dumps(data), verify=False) assert(response.status_code == 200) print("From ndio: {}".format(response.content)) except: raise OSError("Error in posting JSON file {}\ ".format(response.status_code))
def post_data(self, file_name=None, legacy=False, verifytype=VERIFY_BY_SLICE): """ Arguments: file_name (str): The file name of the json file to post (optional). If this is left unspecified it is assumed the data is in the AutoIngest object. dev (bool): If pushing to a microns dev branch server set this to True, if not leave False. verifytype (enum): Set http verification type, by checking the first slice is accessible or by checking channel folder. NOTE: If verification occurs by folder there is NO image size or type verification. Enum: [Folder, Slice] Returns: None """ if (file_name is None): complete_example = ( self.dataset, self.project, self.channels, self.metadata) data = json.loads(self.nd_json(*complete_example)) else: try: with open(file_name) as data_file: data = json.load(data_file) except: raise OSError("Error opening file") # self.verify_path(data, verifytype) # self.verify_json(data) self.put_data(data)
def output_json(self, file_name='/tmp/ND.json'): """ Arguments: file_name(str : '/tmp/ND.json'): The file name to store the json to Returns: None """ complete_example = ( self.dataset, self.project, self.channels, self.metadata) data = json.loads(self.nd_json(*complete_example)) # self.verify_json(data) self.verify_path(data, VERIFY_BY_SLICE) f = open(file_name, 'w') f.write(str(data)) f.close()
def find_path(name, config, wsonly=False): """Find path for given workspace and|or repository.""" workspace = Workspace(config) config = config["workspaces"] path_list = {} if name.find('/') != -1: wsonly = False try: ws, repo = name.split('/') except ValueError: raise ValueError("There is too many / in `name` argument. " "Argument syntax: `workspace/repository`.") if (workspace.exists(ws)): if (repo in config[ws]["repositories"]): path_name = "%s/%s" % (ws, repo) path_list[path_name] = config[ws]["repositories"][repo] for ws_name, ws in sorted(config.items()): if (name == ws_name): if wsonly is True: return {ws_name: ws["path"]} repositories = sorted(config[ws_name]["repositories"].items()) for name, path in repositories: path_list["%s/%s" % (ws_name, name)] = path break for repo_name, repo_path in sorted(ws["repositories"].items()): if (repo_name == name): path_list["%s/%s" % (ws_name, repo_name)] = repo_path return path_list
def get_public_tokens(self): """ Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens """ r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json()
def get_public_datasets_and_tokens(self): """ NOTE: VERY SLOW! Get a dictionary relating key:dataset to value:[tokens] that rely on that dataset. Arguments: None Returns: dict: relating key:dataset to value:[tokens] """ datasets = {} tokens = self.get_public_tokens() for t in tokens: dataset = self.get_token_dataset(t) if dataset in datasets: datasets[dataset].append(t) else: datasets[dataset] = [t] return datasets
def get_proj_info(self, token): """ Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info """ r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
def get_image_size(self, token, resolution=0): """ Return the size of the volume (3D). Convenient for when you want to download the entirety of a dataset. Arguments: token (str): The token for which to find the dataset image bounds resolution (int : 0): The resolution at which to get image bounds. Defaults to 0, to get the largest area available. Returns: int[3]: The size of the bounds. Should == get_volume.shape Raises: RemoteDataNotFoundError: If the token is invalid, or if the metadata at that resolution is unavailable in projinfo. """ info = self.get_proj_info(token) res = str(resolution) if res not in info['dataset']['imagesize']: raise RemoteDataNotFoundError("Resolution " + res + " is not available.") return info['dataset']['imagesize'][str(resolution)]
def set_metadata(self, token, data): """ Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key. """ req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json()
def add_subvolume(self, token, channel, secret, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, title, notes): """ Adds a new subvolume to a token/channel. Arguments: token (str): The token to write to in LIMS channel (str): Channel to add in the subvolume. Can be `None` x_start (int): Start in x dimension x_stop (int): Stop in x dimension y_start (int): Start in y dimension y_stop (int): Stop in y dimension z_start (int): Start in z dimension z_stop (int): Stop in z dimension resolution (int): The resolution at which this subvolume is seen title (str): The title to set for the subvolume notes (str): Optional extra thoughts on the subvolume Returns: boolean: success """ md = self.get_metadata(token)['metadata'] if 'subvolumes' in md: subvols = md['subvolumes'] else: subvols = [] subvols.append({ 'token': token, 'channel': channel, 'x_start': x_start, 'x_stop': x_stop, 'y_start': y_start, 'y_stop': y_stop, 'z_start': z_start, 'z_stop': z_stop, 'resolution': resolution, 'title': title, 'notes': notes }) return self.set_metadata(token, { 'secret': secret, 'subvolumes': subvols })
def get_url(self, url): """ Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object """ try: req = requests.get(url, headers={ 'Authorization': 'Token {}'.format(self._user_token) }, verify=False) if req.status_code is 403: raise ValueError("Access Denied") else: return req except requests.exceptions.ConnectionError as e: if str(e) == '403 Client Error: Forbidden': raise ValueError('Access Denied') else: raise e
def post_url(self, url, token='', json=None, data=None, headers=None): """ Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object """ if (token == ''): token = self._user_token if headers: headers.update({'Authorization': 'Token {}'.format(token)}) else: headers = {'Authorization': 'Token {}'.format(token)} if json: return requests.post(url, headers=headers, json=json, verify=False) if data: return requests.post(url, headers=headers, data=data, verify=False) return requests.post(url, headers=headers, verify=False)
def delete_url(self, url, token=''): """ Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object """ if (token == ''): token = self._user_token return requests.delete(url, headers={ 'Authorization': 'Token {}'.format(token)}, verify=False,)
def ping(self, url, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = self.get_url(url + "/" + endpoint) return r.status_code
def load(hdf5_filename): """ Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: f = h5py.File(hdf5_filename, "r") # neurodata stores data inside the 'cutout' h5 dataset data_layers = f.get('image').get('CUTOUT') except Exception as e: raise ValueError("Could not load file {0} for conversion. {}".format( hdf5_filename, e)) raise return numpy.array(data_layers)
def save(hdf5_filename, array): """ Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: h = h5py.File(hdf5_filename, "w") h.create_dataset('CUTOUT', data=array) h.close() except Exception as e: raise ValueError("Could not save HDF5 file {0}.".format(hdf5_filename)) return hdf5_filename
def run(self, job: Job) -> Future[Result]: ''' return values of execute are set as result of the task returned by ensure_future(), obtainable via task.result() ''' if not self.watcher_ready: self.log.error(f'child watcher unattached when executing {job}') job.cancel('unattached watcher') elif not self.can_execute(job): self.log.error('invalid execution job: {}'.format(job)) job.cancel('invalid') else: self.log.debug('executing {}'.format(job)) task = asyncio.ensure_future(self._execute(job), loop=self.loop) task.add_done_callback(job.finish) task.add_done_callback(L(self.job_done)(job, _)) self.current[job.client] = job return job.status
def infer_gaps_in_tree(df_seq, tree, id_col='id', sequence_col='sequence'): """Adds a character matrix to DendroPy tree and infers gaps using Fitch's algorithm. Infer gaps in sequences at ancestral nodes. """ taxa = tree.taxon_namespace # Get alignment as fasta alignment = df_seq.phylo.to_fasta(id_col=id_col, id_only=True, sequence_col=sequence_col) # Build a Sequence data matrix from Dendropy data = dendropy.ProteinCharacterMatrix.get( data=alignment, schema="fasta", taxon_namespace=taxa) # Construct a map object between sequence data and tree data. taxon_state_sets_map = data.taxon_state_sets_map(gaps_as_missing=False) # Fitch algorithm to determine placement of gaps dendropy.model.parsimony.fitch_down_pass(tree.postorder_node_iter(), taxon_state_sets_map=taxon_state_sets_map) dendropy.model.parsimony.fitch_up_pass(tree.preorder_node_iter()) return tree
def nvim_io_recover(self, io: NvimIORecover[A]) -> NvimIO[B]: '''calls `map` to shift the recover execution to flat_map_nvim_io ''' return eval_step(self.vim)(io.map(lambda a: a))
def read_codeml_output( filename, df, altall_cutoff=0.2, ): """Read codeml file. """ # Read paml output. with open(filename, 'r') as f: data = f.read() # Rip all trees out of the codeml output. regex = re.compile('\([()\w\:. ,]+;') trees = regex.findall(data) anc_tree = trees[2] # First tree in codeml file is the original input tree tip_tree = dendropy.Tree.get(data=trees[0], schema='newick') # Third tree in codeml fule is ancestor tree. anc_tree = dendropy.Tree.get(data=trees[2], schema='newick') # Main tree to return tree = tip_tree # Map ancestors onto main tree object ancestors = anc_tree.internal_nodes() for i, node in enumerate(tree.internal_nodes()): node.label = ancestors[i].label # Map nodes onto dataframe. df['reconstruct_label'] = None for node in tree.postorder_node_iter(): # Ignore parent node if node.parent_node is None: pass elif node.is_leaf(): node_label = node.taxon.label parent_label = node.parent_node.label # Set node label. df.loc[df.uid == node_label, 'reconstruct_label'] = node_label # Set parent label. parent_id = df.loc[df.uid == node_label, 'parent'].values[0] df.loc[df.id == parent_id, 'reconstruct_label'] = node.parent_node.label elif node.is_internal(): label = node.label parent_id = df.loc[df.reconstruct_label == label, 'parent'].values[0] df.loc[df.id == parent_id, 'reconstruct_label'] = node.parent_node.label # Compile a regular expression to find blocks of data for internal nodes node_regex = re.compile("""Prob distribution at node [0-9]+, by site[-\w():.\s]+\n""") # Strip the node number from this block of data. node_num_regex = re.compile("[0-9]+") # Get dataframes for all ancestors. df['ml_sequence'] = None df['ml_posterior'] = None df['alt_sequence'] = None df['alt_posterior'] = None for node in node_regex.findall(data): # Get node label node_label = node_num_regex.search(node).group(0) # Compile regex for matching site data site_regex = re.compile("(?:\w\(\w.\w{3}\) )+") # Iterate through each match for site data. ml_sequence, ml_posterior, alt_sequence, alt_posterior = [], [], [], [] for site in site_regex.findall(node): # Iterate through residues scores = [float(site[i+2:i+7]) for i in range(0,len(site), 9)] residues = [site[i] for i in range(0, len(site), 9)] # Get the indices of sorted scores sorted_score_index = [i[0] for i in sorted( enumerate(scores), key=lambda x:x[1], reverse=True)] ml_idx = sorted_score_index[0] alt_idx = sorted_score_index[1] # Should we keep alterative site. ml_sequence.append(residues[ml_idx]) ml_posterior.append(scores[ml_idx]) if scores[alt_idx] < altall_cutoff: alt_idx = ml_idx alt_sequence.append(residues[alt_idx]) alt_posterior.append(scores[alt_idx]) keys = [ "ml_sequence", "ml_posterior", "alt_sequence", "alt_posterior" ] vals = [ "".join(ml_sequence), sum(ml_posterior) / len(ml_posterior), "".join(alt_sequence), sum(alt_posterior) / len(alt_posterior), ] df.loc[df.reconstruct_label == node_label, keys] = vals return df
def ugettext(message, context=None): """Always return a stripped string, localized if possible""" stripped = strip_whitespace(message) message = add_context(context, stripped) if context else stripped ret = django_ugettext(message) # If the context isn't found, we need to return the string without it return stripped if ret == message else ret
def ungettext(singular, plural, number, context=None): """Always return a stripped string, localized if possible""" singular_stripped = strip_whitespace(singular) plural_stripped = strip_whitespace(plural) if context: singular = add_context(context, singular_stripped) plural = add_context(context, plural_stripped) else: singular = singular_stripped plural = plural_stripped ret = django_nugettext(singular, plural, number) # If the context isn't found, the string is returned as it came if ret == singular: return singular_stripped elif ret == plural: return plural_stripped return ret
def install_jinja_translations(): """ Install our gettext and ngettext functions into Jinja2's environment. """ class Translation(object): """ We pass this object to jinja so it can find our gettext implementation. If we pass the GNUTranslation object directly, it won't have our context and whitespace stripping action. """ ugettext = staticmethod(ugettext) ungettext = staticmethod(ungettext) import jingo jingo.env.install_gettext_translations(Translation)
def activate(locale): """ Override django's utils.translation.activate(). Django forces files to be named django.mo (http://code.djangoproject.com/ticket/6376). Since that's dumb and we want to be able to load different files depending on what part of the site the user is in, we'll make our own function here. """ if INSTALL_JINJA_TRANSLATIONS: install_jinja_translations() if django.VERSION >= (1, 3): django_trans._active.value = _activate(locale) else: from django.utils.thread_support import currentThread django_trans._active[currentThread()] = _activate(locale)
def tweak_message(message): """We piggyback on jinja2's babel_extract() (really, Babel's extract_* functions) but they don't support some things we need so this function will tweak the message. Specifically: 1) We strip whitespace from the msgid. Jinja2 will only strip whitespace from the ends of a string so linebreaks show up in your .po files still. 2) Babel doesn't support context (msgctxt). We hack that in ourselves here. """ if isinstance(message, basestring): message = strip_whitespace(message) elif isinstance(message, tuple): # A tuple of 2 has context, 3 is plural, 4 is plural with context if len(message) == 2: message = add_context(message[1], message[0]) elif len(message) == 3: if all(isinstance(x, basestring) for x in message[:2]): singular, plural, num = message message = (strip_whitespace(singular), strip_whitespace(plural), num) elif len(message) == 4: singular, plural, num, ctxt = message message = (add_context(ctxt, strip_whitespace(singular)), add_context(ctxt, strip_whitespace(plural)), num) return message
def exclusive_ns(guard: StateGuard[A], desc: str, thunk: Callable[..., NS[A, B]], *a: Any) -> Do: '''this is the central unsafe function, using a lock and updating the state in `guard` in-place. ''' yield guard.acquire() log.debug2(lambda: f'exclusive: {desc}') state, response = yield N.ensure_failure(thunk(*a).run(guard.state), guard.release) yield N.delay(lambda v: unsafe_update_state(guard, state)) yield guard.release() log.debug2(lambda: f'release: {desc}') yield N.pure(response)
def _percent(data, part, total): """ Calculate a percentage. """ try: return round(100 * float(data[part]) / float(data[total]), 1) except ZeroDivisionError: return 0
def _get_cache_stats(server_name=None): """ Get stats info. """ server_info = {} for svr in mc_client.get_stats(): svr_info = svr[0].split(' ') svr_name = svr_info[0] svr_stats = svr[1] svr_stats['bytes_percent'] = _percent(svr_stats, 'bytes', 'limit_maxbytes') svr_stats['get_hit_rate'] = _percent(svr_stats, 'get_hits', 'cmd_get') svr_stats['get_miss_rate'] = _percent(svr_stats, 'get_misses', 'cmd_get') if server_name and server_name == svr_name: return svr_stats server_info[svr_name] = svr_stats return server_info
def _get_cache_slabs(server_name=None): """ Get slabs info. """ server_info = {} for svr in mc_client.get_slabs(): svr_info = svr[0].split(' ') svr_name = svr_info[0] if server_name and server_name == svr_name: return svr[1] server_info[svr_name] = svr[1] return server_info
def _context_data(data, request=None): """ Add admin global context, for compatibility with Django 1.7 """ try: return dict(site.each_context(request).items() + data.items()) except AttributeError: return data
def server_status(request): """ Return the status of all servers. """ data = { 'cache_stats': _get_cache_stats(), 'can_get_slabs': hasattr(mc_client, 'get_slabs'), } return render_to_response('memcache_admin/server_status.html', data, RequestContext(request))
def dashboard(request): """ Show the dashboard. """ # mc_client will be a dict if memcached is not configured if not isinstance(mc_client, dict): cache_stats = _get_cache_stats() else: cache_stats = None if cache_stats: data = _context_data({ 'title': _('Memcache Dashboard'), 'cache_stats': cache_stats, 'can_get_slabs': hasattr(mc_client, 'get_slabs'), 'REFRESH_RATE': SETTINGS['REFRESH_RATE'], }, request) template = 'memcache_admin/dashboard.html' else: data = _context_data({ 'title': _('Memcache Dashboard - Error'), 'error_message': _('Unable to connect to a memcache server.'), }, request) template = 'memcache_admin/dashboard_error.html' return render_to_response(template, data, RequestContext(request))
def stats(request, server_name): """ Show server statistics. """ server_name = server_name.strip('/') data = _context_data({ 'title': _('Memcache Statistics for %s') % server_name, 'cache_stats': _get_cache_stats(server_name), }, request) return render_to_response('memcache_admin/stats.html', data, RequestContext(request))
def slabs(request, server_name): """ Show server slabs. """ data = _context_data({ 'title': _('Memcache Slabs for %s') % server_name, 'cache_slabs': _get_cache_slabs(server_name), }, request) return render_to_response('memcache_admin/slabs.html', data, RequestContext(request))
def human_bytes(value): """ Convert a byte value into a human-readable format. """ value = float(value) if value >= 1073741824: gigabytes = value / 1073741824 size = '%.2f GB' % gigabytes elif value >= 1048576: megabytes = value / 1048576 size = '%.2f MB' % megabytes elif value >= 1024: kilobytes = value / 1024 size = '%.2f KB' % kilobytes else: size = '%.2f B' % value return size
def find_config(self, children): """ Find a config in our children so we can fill in variables in our other children with its data. """ named_config = None found_config = None # first see if we got a kwarg named 'config', as this guy is special if 'config' in children: if type(children['config']) == str: children['config'] = ConfigFile(children['config']) elif isinstance(children['config'], Config): children['config'] = children['config'] elif type(children['config']) == dict: children['config'] = Config(data=children['config']) else: raise TypeError("Don't know how to turn {} into a Config".format(type(children['config']))) named_config = children['config'] # next check the other kwargs for k in children: if isinstance(children[k], Config): found_config = children[k] # if we still don't have a config, see if there's a directory with one for k in children: if isinstance(children[k], Directory): for j in children[k]._children: if j == 'config' and not named_config: named_config = children[k]._children[j] if isinstance(children[k]._children[j], Config): found_config = children[k]._children[j] if named_config: return named_config else: return found_config
def add(self, **kwargs): """ Add objects to the environment. """ for key in kwargs: if type(kwargs[key]) == str: self._children[key] = Directory(kwargs[key]) else: self._children[key] = kwargs[key] self._children[key]._env = self self._children[key].apply_config(ConfigApplicator(self.config)) self._children[key].prepare()
def apply_config(self, applicator): """ Replace any config tokens in the file's path with values from the config. """ if type(self._fpath) == str: self._fpath = applicator.apply(self._fpath)
def path(self): """ Get the path to the file relative to its parent. """ if self._parent: return os.path.join(self._parent.path, self._fpath) else: return self._fpath
def read(self): """ Read and return the contents of the file. """ with open(self.path) as f: d = f.read() return d