partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
save
Export a numpy array to a png file. Arguments: filename (str): A filename to which to save the png data numpy_data (numpy.ndarray OR str): The numpy array to save to png. OR a string: If a string is provded, it should be a binary png str Returns: str. The expanded filename that now holds the png data Raises: ValueError: If the save fails; for instance if the binary string data cannot be coerced into a png, or perhaps your numpy.ndarray is ill-formed?
ndio/convert/png.py
def save(filename, numpy_data): """ Export a numpy array to a png file. Arguments: filename (str): A filename to which to save the png data numpy_data (numpy.ndarray OR str): The numpy array to save to png. OR a string: If a string is provded, it should be a binary png str Returns: str. The expanded filename that now holds the png data Raises: ValueError: If the save fails; for instance if the binary string data cannot be coerced into a png, or perhaps your numpy.ndarray is ill-formed? """ # Expand filename to be absolute png_filename = os.path.expanduser(filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: if numpy_data.dtype.name != 'uint8': m = 'I' img = Image.fromarray(numpy_data, mode=m) else: img = Image.fromarray(numpy_data) img.save(png_filename) except Exception as e: raise ValueError("Could not save png file {0}.".format(png_filename)) return png_filename
def save(filename, numpy_data): """ Export a numpy array to a png file. Arguments: filename (str): A filename to which to save the png data numpy_data (numpy.ndarray OR str): The numpy array to save to png. OR a string: If a string is provded, it should be a binary png str Returns: str. The expanded filename that now holds the png data Raises: ValueError: If the save fails; for instance if the binary string data cannot be coerced into a png, or perhaps your numpy.ndarray is ill-formed? """ # Expand filename to be absolute png_filename = os.path.expanduser(filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: if numpy_data.dtype.name != 'uint8': m = 'I' img = Image.fromarray(numpy_data, mode=m) else: img = Image.fromarray(numpy_data) img.save(png_filename) except Exception as e: raise ValueError("Could not save png file {0}.".format(png_filename)) return png_filename
[ "Export", "a", "numpy", "array", "to", "a", "png", "file", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/png.py#L31-L66
[ "def", "save", "(", "filename", ",", "numpy_data", ")", ":", "# Expand filename to be absolute", "png_filename", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "if", "type", "(", "numpy_data", ")", "is", "str", ":", "fp", "=", "open", "(", "png_filename", ",", "\"wb\"", ")", "fp", ".", "write", "(", "numpy_data", ")", "fp", ".", "close", "(", ")", "return", "png_filename", "try", ":", "if", "numpy_data", ".", "dtype", ".", "name", "!=", "'uint8'", ":", "m", "=", "'I'", "img", "=", "Image", ".", "fromarray", "(", "numpy_data", ",", "mode", "=", "m", ")", "else", ":", "img", "=", "Image", ".", "fromarray", "(", "numpy_data", ")", "img", ".", "save", "(", "png_filename", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not save png file {0}.\"", ".", "format", "(", "png_filename", ")", ")", "return", "png_filename" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
save_collection
Export a numpy array to a set of png files, with each Z-index 2D array as its own 2D file. Arguments: png_filename_base: A filename template, such as "my-image-*.png" which will lead to a collection of files named "my-image-0.png", "my-image-1.png", etc. numpy_data: The numpy array data to save to png. Returns: Array. A list of expanded filenames that hold png data.
ndio/convert/png.py
def save_collection(png_filename_base, numpy_data, start_layers_at=1): """ Export a numpy array to a set of png files, with each Z-index 2D array as its own 2D file. Arguments: png_filename_base: A filename template, such as "my-image-*.png" which will lead to a collection of files named "my-image-0.png", "my-image-1.png", etc. numpy_data: The numpy array data to save to png. Returns: Array. A list of expanded filenames that hold png data. """ file_ext = png_filename_base.split('.')[-1] if file_ext in ['png']: # Filename is "name*.ext", set file_base to "name*". file_base = '.'.join(png_filename_base.split('.')[:-1]) else: # Filename is "name*", set file_base to "name*". # That is, extension wasn't included. file_base = png_filename_base file_ext = ".png" file_base_array = file_base.split('*') # The array of filenames to return output_files = [] # Filename 0-padding i = start_layers_at for layer in numpy_data: layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext output_files.append(save(layer_filename, layer)) i += 1 return output_files
def save_collection(png_filename_base, numpy_data, start_layers_at=1): """ Export a numpy array to a set of png files, with each Z-index 2D array as its own 2D file. Arguments: png_filename_base: A filename template, such as "my-image-*.png" which will lead to a collection of files named "my-image-0.png", "my-image-1.png", etc. numpy_data: The numpy array data to save to png. Returns: Array. A list of expanded filenames that hold png data. """ file_ext = png_filename_base.split('.')[-1] if file_ext in ['png']: # Filename is "name*.ext", set file_base to "name*". file_base = '.'.join(png_filename_base.split('.')[:-1]) else: # Filename is "name*", set file_base to "name*". # That is, extension wasn't included. file_base = png_filename_base file_ext = ".png" file_base_array = file_base.split('*') # The array of filenames to return output_files = [] # Filename 0-padding i = start_layers_at for layer in numpy_data: layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext output_files.append(save(layer_filename, layer)) i += 1 return output_files
[ "Export", "a", "numpy", "array", "to", "a", "set", "of", "png", "files", "with", "each", "Z", "-", "index", "2D", "array", "as", "its", "own", "2D", "file", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/png.py#L69-L105
[ "def", "save_collection", "(", "png_filename_base", ",", "numpy_data", ",", "start_layers_at", "=", "1", ")", ":", "file_ext", "=", "png_filename_base", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "file_ext", "in", "[", "'png'", "]", ":", "# Filename is \"name*.ext\", set file_base to \"name*\".", "file_base", "=", "'.'", ".", "join", "(", "png_filename_base", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "else", ":", "# Filename is \"name*\", set file_base to \"name*\".", "# That is, extension wasn't included.", "file_base", "=", "png_filename_base", "file_ext", "=", "\".png\"", "file_base_array", "=", "file_base", ".", "split", "(", "'*'", ")", "# The array of filenames to return", "output_files", "=", "[", "]", "# Filename 0-padding", "i", "=", "start_layers_at", "for", "layer", "in", "numpy_data", ":", "layer_filename", "=", "(", "str", "(", "i", ")", ".", "zfill", "(", "6", ")", ")", ".", "join", "(", "file_base_array", ")", "+", "file_ext", "output_files", ".", "append", "(", "save", "(", "layer_filename", ",", "layer", ")", ")", "i", "+=", "1", "return", "output_files" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
load_collection
Import all files matching the filename base given with `png_filename_base`. Images are ordered by alphabetical order, which means that you *MUST* 0-pad your numbers if they span a power of ten (e.g. 0999-1000 or 09-10). This is handled automatically by its complementary function, `png.save_collection`. Also, look at how nicely these documentation lines are all the same length! Arguments: png_filename_base (str): An asterisk-wildcard string that should refer to all PNGs in the stack. All *s are replaced according to regular cmd-line expansion rules. See the 'glob' documentation for details Returns: A numpy array holding a 3D dataset
ndio/convert/png.py
def load_collection(png_filename_base): """ Import all files matching the filename base given with `png_filename_base`. Images are ordered by alphabetical order, which means that you *MUST* 0-pad your numbers if they span a power of ten (e.g. 0999-1000 or 09-10). This is handled automatically by its complementary function, `png.save_collection`. Also, look at how nicely these documentation lines are all the same length! Arguments: png_filename_base (str): An asterisk-wildcard string that should refer to all PNGs in the stack. All *s are replaced according to regular cmd-line expansion rules. See the 'glob' documentation for details Returns: A numpy array holding a 3D dataset """ # We expect images to be indexed by their alphabetical order. files = glob.glob(png_filename_base) files.sort() numpy_data = [] for f in files: numpy_data.append(load(f)) return numpy.concatenate(numpy_data)
def load_collection(png_filename_base): """ Import all files matching the filename base given with `png_filename_base`. Images are ordered by alphabetical order, which means that you *MUST* 0-pad your numbers if they span a power of ten (e.g. 0999-1000 or 09-10). This is handled automatically by its complementary function, `png.save_collection`. Also, look at how nicely these documentation lines are all the same length! Arguments: png_filename_base (str): An asterisk-wildcard string that should refer to all PNGs in the stack. All *s are replaced according to regular cmd-line expansion rules. See the 'glob' documentation for details Returns: A numpy array holding a 3D dataset """ # We expect images to be indexed by their alphabetical order. files = glob.glob(png_filename_base) files.sort() numpy_data = [] for f in files: numpy_data.append(load(f)) return numpy.concatenate(numpy_data)
[ "Import", "all", "files", "matching", "the", "filename", "base", "given", "with", "png_filename_base", ".", "Images", "are", "ordered", "by", "alphabetical", "order", "which", "means", "that", "you", "*", "MUST", "*", "0", "-", "pad", "your", "numbers", "if", "they", "span", "a", "power", "of", "ten", "(", "e", ".", "g", ".", "0999", "-", "1000", "or", "09", "-", "10", ")", ".", "This", "is", "handled", "automatically", "by", "its", "complementary", "function", "png", ".", "save_collection", ".", "Also", "look", "at", "how", "nicely", "these", "documentation", "lines", "are", "all", "the", "same", "length!" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/png.py#L108-L131
[ "def", "load_collection", "(", "png_filename_base", ")", ":", "# We expect images to be indexed by their alphabetical order.", "files", "=", "glob", ".", "glob", "(", "png_filename_base", ")", "files", ".", "sort", "(", ")", "numpy_data", "=", "[", "]", "for", "f", "in", "files", ":", "numpy_data", ".", "append", "(", "load", "(", "f", ")", ")", "return", "numpy", ".", "concatenate", "(", "numpy_data", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
Status.print_workspace
Print workspace status.
yoda/subcommand/status.py
def print_workspace(self, name): """Print workspace status.""" path_list = find_path(name, self.config) if len(path_list) == 0: self.logger.error("No matches for `%s`" % name) return False for name, path in path_list.items(): self.print_status(name, path)
def print_workspace(self, name): """Print workspace status.""" path_list = find_path(name, self.config) if len(path_list) == 0: self.logger.error("No matches for `%s`" % name) return False for name, path in path_list.items(): self.print_status(name, path)
[ "Print", "workspace", "status", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/subcommand/status.py#L56-L65
[ "def", "print_workspace", "(", "self", ",", "name", ")", ":", "path_list", "=", "find_path", "(", "name", ",", "self", ".", "config", ")", "if", "len", "(", "path_list", ")", "==", "0", ":", "self", ".", "logger", ".", "error", "(", "\"No matches for `%s`\"", "%", "name", ")", "return", "False", "for", "name", ",", "path", "in", "path_list", ".", "items", "(", ")", ":", "self", ".", "print_status", "(", "name", ",", "path", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
Status.print_status
Print repository status.
yoda/subcommand/status.py
def print_status(self, repo_name, repo_path): """Print repository status.""" color = Color() self.logger.info(color.colored( "=> [%s] %s" % (repo_name, repo_path), "green")) try: repo = Repository(repo_path) repo.status() except RepositoryError as e: self.logger.error(e) pass print("\n")
def print_status(self, repo_name, repo_path): """Print repository status.""" color = Color() self.logger.info(color.colored( "=> [%s] %s" % (repo_name, repo_path), "green")) try: repo = Repository(repo_path) repo.status() except RepositoryError as e: self.logger.error(e) pass print("\n")
[ "Print", "repository", "status", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/subcommand/status.py#L67-L78
[ "def", "print_status", "(", "self", ",", "repo_name", ",", "repo_path", ")", ":", "color", "=", "Color", "(", ")", "self", ".", "logger", ".", "info", "(", "color", ".", "colored", "(", "\"=> [%s] %s\"", "%", "(", "repo_name", ",", "repo_path", ")", ",", "\"green\"", ")", ")", "try", ":", "repo", "=", "Repository", "(", "repo_path", ")", "repo", ".", "status", "(", ")", "except", "RepositoryError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "e", ")", "pass", "print", "(", "\"\\n\"", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
data.get_block_size
Gets the block-size for a given token at a given resolution. Arguments: token (str): The token to inspect resolution (int : None): The resolution at which to inspect data. If none is specified, uses the minimum available. Returns: int[3]: The xyz blocksize.
ndio/remote/data.py
def get_block_size(self, token, resolution=None): """ Gets the block-size for a given token at a given resolution. Arguments: token (str): The token to inspect resolution (int : None): The resolution at which to inspect data. If none is specified, uses the minimum available. Returns: int[3]: The xyz blocksize. """ cdims = self.get_metadata(token)['dataset']['cube_dimension'] if resolution is None: resolution = min(cdims.keys()) return cdims[str(resolution)]
def get_block_size(self, token, resolution=None): """ Gets the block-size for a given token at a given resolution. Arguments: token (str): The token to inspect resolution (int : None): The resolution at which to inspect data. If none is specified, uses the minimum available. Returns: int[3]: The xyz blocksize. """ cdims = self.get_metadata(token)['dataset']['cube_dimension'] if resolution is None: resolution = min(cdims.keys()) return cdims[str(resolution)]
[ "Gets", "the", "block", "-", "size", "for", "a", "given", "token", "at", "a", "given", "resolution", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/data.py#L87-L102
[ "def", "get_block_size", "(", "self", ",", "token", ",", "resolution", "=", "None", ")", ":", "cdims", "=", "self", ".", "get_metadata", "(", "token", ")", "[", "'dataset'", "]", "[", "'cube_dimension'", "]", "if", "resolution", "is", "None", ":", "resolution", "=", "min", "(", "cdims", ".", "keys", "(", ")", ")", "return", "cdims", "[", "str", "(", "resolution", ")", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
data.get_xy_slice
Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data
ndio/remote/data.py
def get_xy_slice(self, token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution=0): """ Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data """ vol = self.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_index, z_index + 1, resolution) vol = numpy.squeeze(vol) # 3D volume to 2D slice return vol
def get_xy_slice(self, token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution=0): """ Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data """ vol = self.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_index, z_index + 1, resolution) vol = numpy.squeeze(vol) # 3D volume to 2D slice return vol
[ "Return", "a", "binary", "-", "encoded", "decompressed", "2d", "image", ".", "You", "should", "specify", "a", "token", "and", "channel", "pair", ".", "For", "image", "data", "users", "should", "use", "the", "channel", "image", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/data.py#L125-L151
[ "def", "get_xy_slice", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_index", ",", "resolution", "=", "0", ")", ":", "vol", "=", "self", ".", "get_cutout", "(", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_index", ",", "z_index", "+", "1", ",", "resolution", ")", "vol", "=", "numpy", ".", "squeeze", "(", "vol", ")", "# 3D volume to 2D slice", "return", "vol" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
data.get_volume
Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data.
ndio/remote/data.py
def get_volume(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data. """ size = (x_stop - x_start) * (y_stop - y_start) * (z_stop - z_start) volume = ramon.RAMONVolume() volume.xyz_offset = [x_start, y_start, z_start] volume.resolution = resolution volume.cutout = self.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=resolution, block_size=block_size, neariso=neariso) return volume
def get_volume(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data. """ size = (x_stop - x_start) * (y_stop - y_start) * (z_stop - z_start) volume = ramon.RAMONVolume() volume.xyz_offset = [x_start, y_start, z_start] volume.resolution = resolution volume.cutout = self.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=resolution, block_size=block_size, neariso=neariso) return volume
[ "Get", "a", "RAMONVolume", "volumetric", "cutout", "from", "the", "neurodata", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/data.py#L167-L201
[ "def", "get_volume", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "resolution", "=", "1", ",", "block_size", "=", "DEFAULT_BLOCK_SIZE", ",", "neariso", "=", "False", ")", ":", "size", "=", "(", "x_stop", "-", "x_start", ")", "*", "(", "y_stop", "-", "y_start", ")", "*", "(", "z_stop", "-", "z_start", ")", "volume", "=", "ramon", ".", "RAMONVolume", "(", ")", "volume", ".", "xyz_offset", "=", "[", "x_start", ",", "y_start", ",", "z_start", "]", "volume", ".", "resolution", "=", "resolution", "volume", ".", "cutout", "=", "self", ".", "get_cutout", "(", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "resolution", "=", "resolution", ",", "block_size", "=", "block_size", ",", "neariso", "=", "neariso", ")", "return", "volume" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
data.get_cutout
Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data.
ndio/remote/data.py
def get_cutout(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start=0, t_stop=1, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data. """ if block_size is None: # look up block size from metadata block_size = self.get_block_size(token, resolution) origin = self.get_image_offset(token, resolution) # If z_stop - z_start is < 16, backend still pulls minimum 16 slices if (z_stop - z_start) < 16: z_slices = 16 else: z_slices = z_stop - z_start # Calculate size of the data to be downloaded. size = (x_stop - x_start) * (y_stop - y_start) * z_slices * 4 # Switch which download function to use based on which libraries are # available in this version of python. if six.PY2: dl_func = self._get_cutout_blosc_no_chunking elif six.PY3: dl_func = self._get_cutout_no_chunking else: raise ValueError("Invalid Python version.") if size < self._chunk_threshold: vol = dl_func(token, channel, resolution, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start, t_stop, neariso=neariso) vol = numpy.rollaxis(vol, 1) vol = numpy.rollaxis(vol, 2) return vol else: from ndio.utils.parallel import block_compute blocks = block_compute(x_start, x_stop, y_start, y_stop, z_start, z_stop, origin, block_size) vol = numpy.zeros(((z_stop - z_start), (y_stop - y_start), (x_stop - x_start))) for b in blocks: data = dl_func(token, channel, resolution, b[0][0], b[0][1], b[1][0], b[1][1], b[2][0], b[2][1], 0, 1, neariso=neariso) if b == blocks[0]: # first block vol = numpy.zeros(((z_stop - z_start), (y_stop - y_start), (x_stop - x_start)), dtype=data.dtype) vol[b[2][0] - z_start: b[2][1] - z_start, b[1][0] - y_start: b[1][1] - y_start, b[0][0] - x_start: b[0][1] - x_start] = data vol = numpy.rollaxis(vol, 1) vol = numpy.rollaxis(vol, 2) return vol
def get_cutout(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start=0, t_stop=1, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data. """ if block_size is None: # look up block size from metadata block_size = self.get_block_size(token, resolution) origin = self.get_image_offset(token, resolution) # If z_stop - z_start is < 16, backend still pulls minimum 16 slices if (z_stop - z_start) < 16: z_slices = 16 else: z_slices = z_stop - z_start # Calculate size of the data to be downloaded. size = (x_stop - x_start) * (y_stop - y_start) * z_slices * 4 # Switch which download function to use based on which libraries are # available in this version of python. if six.PY2: dl_func = self._get_cutout_blosc_no_chunking elif six.PY3: dl_func = self._get_cutout_no_chunking else: raise ValueError("Invalid Python version.") if size < self._chunk_threshold: vol = dl_func(token, channel, resolution, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start, t_stop, neariso=neariso) vol = numpy.rollaxis(vol, 1) vol = numpy.rollaxis(vol, 2) return vol else: from ndio.utils.parallel import block_compute blocks = block_compute(x_start, x_stop, y_start, y_stop, z_start, z_stop, origin, block_size) vol = numpy.zeros(((z_stop - z_start), (y_stop - y_start), (x_stop - x_start))) for b in blocks: data = dl_func(token, channel, resolution, b[0][0], b[0][1], b[1][0], b[1][1], b[2][0], b[2][1], 0, 1, neariso=neariso) if b == blocks[0]: # first block vol = numpy.zeros(((z_stop - z_start), (y_stop - y_start), (x_stop - x_start)), dtype=data.dtype) vol[b[2][0] - z_start: b[2][1] - z_start, b[1][0] - y_start: b[1][1] - y_start, b[0][0] - x_start: b[0][1] - x_start] = data vol = numpy.rollaxis(vol, 1) vol = numpy.rollaxis(vol, 2) return vol
[ "Get", "volumetric", "cutout", "data", "from", "the", "neurodata", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/data.py#L203-L294
[ "def", "get_cutout", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "t_start", "=", "0", ",", "t_stop", "=", "1", ",", "resolution", "=", "1", ",", "block_size", "=", "DEFAULT_BLOCK_SIZE", ",", "neariso", "=", "False", ")", ":", "if", "block_size", "is", "None", ":", "# look up block size from metadata", "block_size", "=", "self", ".", "get_block_size", "(", "token", ",", "resolution", ")", "origin", "=", "self", ".", "get_image_offset", "(", "token", ",", "resolution", ")", "# If z_stop - z_start is < 16, backend still pulls minimum 16 slices", "if", "(", "z_stop", "-", "z_start", ")", "<", "16", ":", "z_slices", "=", "16", "else", ":", "z_slices", "=", "z_stop", "-", "z_start", "# Calculate size of the data to be downloaded.", "size", "=", "(", "x_stop", "-", "x_start", ")", "*", "(", "y_stop", "-", "y_start", ")", "*", "z_slices", "*", "4", "# Switch which download function to use based on which libraries are", "# available in this version of python.", "if", "six", ".", "PY2", ":", "dl_func", "=", "self", ".", "_get_cutout_blosc_no_chunking", "elif", "six", ".", "PY3", ":", "dl_func", "=", "self", ".", "_get_cutout_no_chunking", "else", ":", "raise", "ValueError", "(", "\"Invalid Python version.\"", ")", "if", "size", "<", "self", ".", "_chunk_threshold", ":", "vol", "=", "dl_func", "(", "token", ",", "channel", ",", "resolution", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "t_start", ",", "t_stop", ",", "neariso", "=", "neariso", ")", "vol", "=", "numpy", ".", "rollaxis", "(", "vol", ",", "1", ")", "vol", "=", "numpy", ".", "rollaxis", "(", "vol", ",", "2", ")", "return", "vol", "else", ":", "from", "ndio", ".", "utils", ".", "parallel", "import", "block_compute", "blocks", "=", "block_compute", "(", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "origin", ",", "block_size", ")", "vol", "=", "numpy", ".", "zeros", "(", "(", "(", "z_stop", "-", "z_start", ")", ",", "(", "y_stop", "-", "y_start", ")", ",", "(", "x_stop", "-", "x_start", ")", ")", ")", "for", "b", "in", "blocks", ":", "data", "=", "dl_func", "(", "token", ",", "channel", ",", "resolution", ",", "b", "[", "0", "]", "[", "0", "]", ",", "b", "[", "0", "]", "[", "1", "]", ",", "b", "[", "1", "]", "[", "0", "]", ",", "b", "[", "1", "]", "[", "1", "]", ",", "b", "[", "2", "]", "[", "0", "]", ",", "b", "[", "2", "]", "[", "1", "]", ",", "0", ",", "1", ",", "neariso", "=", "neariso", ")", "if", "b", "==", "blocks", "[", "0", "]", ":", "# first block", "vol", "=", "numpy", ".", "zeros", "(", "(", "(", "z_stop", "-", "z_start", ")", ",", "(", "y_stop", "-", "y_start", ")", ",", "(", "x_stop", "-", "x_start", ")", ")", ",", "dtype", "=", "data", ".", "dtype", ")", "vol", "[", "b", "[", "2", "]", "[", "0", "]", "-", "z_start", ":", "b", "[", "2", "]", "[", "1", "]", "-", "z_start", ",", "b", "[", "1", "]", "[", "0", "]", "-", "y_start", ":", "b", "[", "1", "]", "[", "1", "]", "-", "y_start", ",", "b", "[", "0", "]", "[", "0", "]", "-", "x_start", ":", "b", "[", "0", "]", "[", "1", "]", "-", "x_start", "]", "=", "data", "vol", "=", "numpy", ".", "rollaxis", "(", "vol", ",", "1", ")", "vol", "=", "numpy", ".", "rollaxis", "(", "vol", ",", "2", ")", "return", "vol" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
data.post_cutout
Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload.
ndio/remote/data.py
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): """ Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload. """ datatype = self.get_proj_info(token)['channels'][channel]['datatype'] if data.dtype.name != datatype: data = data.astype(datatype) data = numpy.rollaxis(data, 1) data = numpy.rollaxis(data, 2) if six.PY3 or data.nbytes > 1.5e9: ul_func = self._post_cutout_no_chunking_npz else: ul_func = self._post_cutout_no_chunking_blosc if data.size < self._chunk_threshold: return ul_func(token, channel, x_start, y_start, z_start, data, resolution) return self._post_cutout_with_chunking(token, channel, x_start, y_start, z_start, data, resolution, ul_func)
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): """ Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload. """ datatype = self.get_proj_info(token)['channels'][channel]['datatype'] if data.dtype.name != datatype: data = data.astype(datatype) data = numpy.rollaxis(data, 1) data = numpy.rollaxis(data, 2) if six.PY3 or data.nbytes > 1.5e9: ul_func = self._post_cutout_no_chunking_npz else: ul_func = self._post_cutout_no_chunking_blosc if data.size < self._chunk_threshold: return ul_func(token, channel, x_start, y_start, z_start, data, resolution) return self._post_cutout_with_chunking(token, channel, x_start, y_start, z_start, data, resolution, ul_func)
[ "Post", "a", "cutout", "to", "the", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/data.py#L356-L399
[ "def", "post_cutout", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "y_start", ",", "z_start", ",", "data", ",", "resolution", "=", "0", ")", ":", "datatype", "=", "self", ".", "get_proj_info", "(", "token", ")", "[", "'channels'", "]", "[", "channel", "]", "[", "'datatype'", "]", "if", "data", ".", "dtype", ".", "name", "!=", "datatype", ":", "data", "=", "data", ".", "astype", "(", "datatype", ")", "data", "=", "numpy", ".", "rollaxis", "(", "data", ",", "1", ")", "data", "=", "numpy", ".", "rollaxis", "(", "data", ",", "2", ")", "if", "six", ".", "PY3", "or", "data", ".", "nbytes", ">", "1.5e9", ":", "ul_func", "=", "self", ".", "_post_cutout_no_chunking_npz", "else", ":", "ul_func", "=", "self", ".", "_post_cutout_no_chunking_blosc", "if", "data", ".", "size", "<", "self", ".", "_chunk_threshold", ":", "return", "ul_func", "(", "token", ",", "channel", ",", "x_start", ",", "y_start", ",", "z_start", ",", "data", ",", "resolution", ")", "return", "self", ".", "_post_cutout_with_chunking", "(", "token", ",", "channel", ",", "x_start", ",", "y_start", ",", "z_start", ",", "data", ",", "resolution", ",", "ul_func", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
data._post_cutout_no_chunking_blosc
Accepts data in zyx. !!!
ndio/remote/data.py
def _post_cutout_no_chunking_blosc(self, token, channel, x_start, y_start, z_start, data, resolution): """ Accepts data in zyx. !!! """ data = numpy.expand_dims(data, axis=0) blosc_data = blosc.pack_array(data) url = self.url("{}/{}/blosc/{}/{},{}/{},{}/{},{}/0,0/".format( token, channel, resolution, x_start, x_start + data.shape[3], y_start, y_start + data.shape[2], z_start, z_start + data.shape[1] )) req = self.remote_utils.post_url(url, data=blosc_data, headers={ 'Content-Type': 'application/octet-stream' }) if req.status_code is not 200: raise RemoteDataUploadError(req.text) else: return True
def _post_cutout_no_chunking_blosc(self, token, channel, x_start, y_start, z_start, data, resolution): """ Accepts data in zyx. !!! """ data = numpy.expand_dims(data, axis=0) blosc_data = blosc.pack_array(data) url = self.url("{}/{}/blosc/{}/{},{}/{},{}/{},{}/0,0/".format( token, channel, resolution, x_start, x_start + data.shape[3], y_start, y_start + data.shape[2], z_start, z_start + data.shape[1] )) req = self.remote_utils.post_url(url, data=blosc_data, headers={ 'Content-Type': 'application/octet-stream' }) if req.status_code is not 200: raise RemoteDataUploadError(req.text) else: return True
[ "Accepts", "data", "in", "zyx", ".", "!!!" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/data.py#L447-L470
[ "def", "_post_cutout_no_chunking_blosc", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "y_start", ",", "z_start", ",", "data", ",", "resolution", ")", ":", "data", "=", "numpy", ".", "expand_dims", "(", "data", ",", "axis", "=", "0", ")", "blosc_data", "=", "blosc", ".", "pack_array", "(", "data", ")", "url", "=", "self", ".", "url", "(", "\"{}/{}/blosc/{}/{},{}/{},{}/{},{}/0,0/\"", ".", "format", "(", "token", ",", "channel", ",", "resolution", ",", "x_start", ",", "x_start", "+", "data", ".", "shape", "[", "3", "]", ",", "y_start", ",", "y_start", "+", "data", ".", "shape", "[", "2", "]", ",", "z_start", ",", "z_start", "+", "data", ".", "shape", "[", "1", "]", ")", ")", "req", "=", "self", ".", "remote_utils", ".", "post_url", "(", "url", ",", "data", "=", "blosc_data", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/octet-stream'", "}", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataUploadError", "(", "req", ".", "text", ")", "else", ":", "return", "True" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
load
Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file
ndio/convert/tiff.py
def load(tiff_filename): """ Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file """ # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) try: img = tiff.imread(tiff_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(tiff_filename)) raise return numpy.array(img)
def load(tiff_filename): """ Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file """ # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) try: img = tiff.imread(tiff_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(tiff_filename)) raise return numpy.array(img)
[ "Import", "a", "TIFF", "file", "into", "a", "numpy", "array", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/tiff.py#L8-L28
[ "def", "load", "(", "tiff_filename", ")", ":", "# Expand filename to be absolute", "tiff_filename", "=", "os", ".", "path", ".", "expanduser", "(", "tiff_filename", ")", "try", ":", "img", "=", "tiff", ".", "imread", "(", "tiff_filename", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not load file {0} for conversion.\"", ".", "format", "(", "tiff_filename", ")", ")", "raise", "return", "numpy", ".", "array", "(", "img", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
save
Export a numpy array to a TIFF file. Arguments: tiff_filename: A filename to which to save the TIFF data numpy_data: The numpy array to save to TIFF Returns: String. The expanded filename that now holds the TIFF data
ndio/convert/tiff.py
def save(tiff_filename, numpy_data): """ Export a numpy array to a TIFF file. Arguments: tiff_filename: A filename to which to save the TIFF data numpy_data: The numpy array to save to TIFF Returns: String. The expanded filename that now holds the TIFF data """ # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: img = tiff.imsave(tiff_filename, numpy_data) except Exception as e: raise ValueError("Could not save TIFF file {0}.".format(tiff_filename)) return tiff_filename
def save(tiff_filename, numpy_data): """ Export a numpy array to a TIFF file. Arguments: tiff_filename: A filename to which to save the TIFF data numpy_data: The numpy array to save to TIFF Returns: String. The expanded filename that now holds the TIFF data """ # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: img = tiff.imsave(tiff_filename, numpy_data) except Exception as e: raise ValueError("Could not save TIFF file {0}.".format(tiff_filename)) return tiff_filename
[ "Export", "a", "numpy", "array", "to", "a", "TIFF", "file", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/tiff.py#L31-L56
[ "def", "save", "(", "tiff_filename", ",", "numpy_data", ")", ":", "# Expand filename to be absolute", "tiff_filename", "=", "os", ".", "path", ".", "expanduser", "(", "tiff_filename", ")", "if", "type", "(", "numpy_data", ")", "is", "str", ":", "fp", "=", "open", "(", "png_filename", ",", "\"wb\"", ")", "fp", ".", "write", "(", "numpy_data", ")", "fp", ".", "close", "(", ")", "return", "png_filename", "try", ":", "img", "=", "tiff", ".", "imsave", "(", "tiff_filename", ",", "numpy_data", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not save TIFF file {0}.\"", ".", "format", "(", "tiff_filename", ")", ")", "return", "tiff_filename" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
load_tiff_multipage
Load a multipage tiff into a single variable in x,y,z format. Arguments: tiff_filename: Filename of source data dtype: data type to use for the returned tensor Returns: Array containing contents from input tiff file in xyz order
ndio/convert/tiff.py
def load_tiff_multipage(tiff_filename, dtype='float32'): """ Load a multipage tiff into a single variable in x,y,z format. Arguments: tiff_filename: Filename of source data dtype: data type to use for the returned tensor Returns: Array containing contents from input tiff file in xyz order """ if not os.path.isfile(tiff_filename): raise RuntimeError('could not find file "%s"' % tiff_filename) # load the data from multi-layer TIF files data = tiff.imread(tiff_filename) im = [] while True: Xi = numpy.array(data, dtype=dtype) if Xi.ndim == 2: Xi = Xi[numpy.newaxis, ...] # add slice dimension im.append(Xi) try: data.seek(data.tell()+1) except EOFError: break # this just means hit end of file (not really an error) im = numpy.concatenate(im, axis=0) # list of 2d -> tensor im = numpy.rollaxis(im, 1) im = numpy.rollaxis(im, 2) return im
def load_tiff_multipage(tiff_filename, dtype='float32'): """ Load a multipage tiff into a single variable in x,y,z format. Arguments: tiff_filename: Filename of source data dtype: data type to use for the returned tensor Returns: Array containing contents from input tiff file in xyz order """ if not os.path.isfile(tiff_filename): raise RuntimeError('could not find file "%s"' % tiff_filename) # load the data from multi-layer TIF files data = tiff.imread(tiff_filename) im = [] while True: Xi = numpy.array(data, dtype=dtype) if Xi.ndim == 2: Xi = Xi[numpy.newaxis, ...] # add slice dimension im.append(Xi) try: data.seek(data.tell()+1) except EOFError: break # this just means hit end of file (not really an error) im = numpy.concatenate(im, axis=0) # list of 2d -> tensor im = numpy.rollaxis(im, 1) im = numpy.rollaxis(im, 2) return im
[ "Load", "a", "multipage", "tiff", "into", "a", "single", "variable", "in", "x", "y", "z", "format", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/tiff.py#L98-L133
[ "def", "load_tiff_multipage", "(", "tiff_filename", ",", "dtype", "=", "'float32'", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "tiff_filename", ")", ":", "raise", "RuntimeError", "(", "'could not find file \"%s\"'", "%", "tiff_filename", ")", "# load the data from multi-layer TIF files", "data", "=", "tiff", ".", "imread", "(", "tiff_filename", ")", "im", "=", "[", "]", "while", "True", ":", "Xi", "=", "numpy", ".", "array", "(", "data", ",", "dtype", "=", "dtype", ")", "if", "Xi", ".", "ndim", "==", "2", ":", "Xi", "=", "Xi", "[", "numpy", ".", "newaxis", ",", "...", "]", "# add slice dimension", "im", ".", "append", "(", "Xi", ")", "try", ":", "data", ".", "seek", "(", "data", ".", "tell", "(", ")", "+", "1", ")", "except", "EOFError", ":", "break", "# this just means hit end of file (not really an error)", "im", "=", "numpy", ".", "concatenate", "(", "im", ",", "axis", "=", "0", ")", "# list of 2d -> tensor", "im", "=", "numpy", ".", "rollaxis", "(", "im", ",", "1", ")", "im", "=", "numpy", ".", "rollaxis", "(", "im", ",", "2", ")", "return", "im" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
Config.write
Write config in configuration file. Data must me a dict.
yoda/config.py
def write(self): """ Write config in configuration file. Data must me a dict. """ file = open(self.config_file, "w+") file.write(yaml.dump(dict(self), default_flow_style=False)) file.close()
def write(self): """ Write config in configuration file. Data must me a dict. """ file = open(self.config_file, "w+") file.write(yaml.dump(dict(self), default_flow_style=False)) file.close()
[ "Write", "config", "in", "configuration", "file", ".", "Data", "must", "me", "a", "dict", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/config.py#L50-L57
[ "def", "write", "(", "self", ")", ":", "file", "=", "open", "(", "self", ".", "config_file", ",", "\"w+\"", ")", "file", ".", "write", "(", "yaml", ".", "dump", "(", "dict", "(", "self", ")", ",", "default_flow_style", "=", "False", ")", ")", "file", ".", "close", "(", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
Bzr.clone
Clone repository from url.
yoda/adapter/bzr.py
def clone(self, url): """Clone repository from url.""" return self.execute("%s branch %s %s" % (self.executable, url, self.path))
def clone(self, url): """Clone repository from url.""" return self.execute("%s branch %s %s" % (self.executable, url, self.path))
[ "Clone", "repository", "from", "url", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/adapter/bzr.py#L31-L34
[ "def", "clone", "(", "self", ",", "url", ")", ":", "return", "self", ".", "execute", "(", "\"%s branch %s %s\"", "%", "(", "self", ".", "executable", ",", "url", ",", "self", ".", "path", ")", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
get_version
Get version from package resources.
yoda/version.py
def get_version(): """Get version from package resources.""" requirement = pkg_resources.Requirement.parse("yoda") provider = pkg_resources.get_provider(requirement) return provider.version
def get_version(): """Get version from package resources.""" requirement = pkg_resources.Requirement.parse("yoda") provider = pkg_resources.get_provider(requirement) return provider.version
[ "Get", "version", "from", "package", "resources", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/version.py#L20-L24
[ "def", "get_version", "(", ")", ":", "requirement", "=", "pkg_resources", ".", "Requirement", ".", "parse", "(", "\"yoda\"", ")", "provider", "=", "pkg_resources", ".", "get_provider", "(", "requirement", ")", "return", "provider", ".", "version" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
mix_and_match
Mixing and matching positional args and keyword options.
examples/complex_example.py
def mix_and_match(name, greeting='Hello', yell=False): '''Mixing and matching positional args and keyword options.''' say = '%s, %s' % (greeting, name) if yell: print '%s!' % say.upper() else: print '%s.' % say
def mix_and_match(name, greeting='Hello', yell=False): '''Mixing and matching positional args and keyword options.''' say = '%s, %s' % (greeting, name) if yell: print '%s!' % say.upper() else: print '%s.' % say
[ "Mixing", "and", "matching", "positional", "args", "and", "keyword", "options", "." ]
jmohr/compago
python
https://github.com/jmohr/compago/blob/8cd6a2894f7b69844b1e4f367344f51a8ef07baf/examples/complex_example.py#L33-L39
[ "def", "mix_and_match", "(", "name", ",", "greeting", "=", "'Hello'", ",", "yell", "=", "False", ")", ":", "say", "=", "'%s, %s'", "%", "(", "greeting", ",", "name", ")", "if", "yell", ":", "print", "'%s!'", "%", "say", ".", "upper", "(", ")", "else", ":", "print", "'%s.'", "%", "say" ]
8cd6a2894f7b69844b1e4f367344f51a8ef07baf
test
option_decorator
Same as mix_and_match, but using the @option decorator.
examples/complex_example.py
def option_decorator(name, greeting, yell): '''Same as mix_and_match, but using the @option decorator.''' # Use the @option decorator when you need more control over the # command line options. say = '%s, %s' % (greeting, name) if yell: print '%s!' % say.upper() else: print '%s.' % say
def option_decorator(name, greeting, yell): '''Same as mix_and_match, but using the @option decorator.''' # Use the @option decorator when you need more control over the # command line options. say = '%s, %s' % (greeting, name) if yell: print '%s!' % say.upper() else: print '%s.' % say
[ "Same", "as", "mix_and_match", "but", "using", "the" ]
jmohr/compago
python
https://github.com/jmohr/compago/blob/8cd6a2894f7b69844b1e4f367344f51a8ef07baf/examples/complex_example.py#L43-L51
[ "def", "option_decorator", "(", "name", ",", "greeting", ",", "yell", ")", ":", "# Use the @option decorator when you need more control over the", "# command line options.", "say", "=", "'%s, %s'", "%", "(", "greeting", ",", "name", ")", "if", "yell", ":", "print", "'%s!'", "%", "say", ".", "upper", "(", ")", "else", ":", "print", "'%s.'", "%", "say" ]
8cd6a2894f7b69844b1e4f367344f51a8ef07baf
test
load
Import a nifti file into a numpy array. TODO: Currently only transfers raw data for compatibility with annotation and ND formats Arguments: nifti_filename (str): A string filename of a nifti datafile Returns: A numpy array with data from the nifti file
ndio/convert/nifti.py
def load(nifti_filename): """ Import a nifti file into a numpy array. TODO: Currently only transfers raw data for compatibility with annotation and ND formats Arguments: nifti_filename (str): A string filename of a nifti datafile Returns: A numpy array with data from the nifti file """ # Expand filename to be absolute nifti_filename = os.path.expanduser(nifti_filename) try: data = nib.load(nifti_filename) img = data.get_data() except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(nifti_filename)) raise return img
def load(nifti_filename): """ Import a nifti file into a numpy array. TODO: Currently only transfers raw data for compatibility with annotation and ND formats Arguments: nifti_filename (str): A string filename of a nifti datafile Returns: A numpy array with data from the nifti file """ # Expand filename to be absolute nifti_filename = os.path.expanduser(nifti_filename) try: data = nib.load(nifti_filename) img = data.get_data() except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(nifti_filename)) raise return img
[ "Import", "a", "nifti", "file", "into", "a", "numpy", "array", ".", "TODO", ":", "Currently", "only", "transfers", "raw", "data", "for", "compatibility", "with", "annotation", "and", "ND", "formats" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/nifti.py#L8-L31
[ "def", "load", "(", "nifti_filename", ")", ":", "# Expand filename to be absolute", "nifti_filename", "=", "os", ".", "path", ".", "expanduser", "(", "nifti_filename", ")", "try", ":", "data", "=", "nib", ".", "load", "(", "nifti_filename", ")", "img", "=", "data", ".", "get_data", "(", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not load file {0} for conversion.\"", ".", "format", "(", "nifti_filename", ")", ")", "raise", "return", "img" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
save
Export a numpy array to a nifti file. TODO: currently using dummy headers and identity matrix affine transform. This can be expanded. Arguments: nifti_filename (str): A filename to which to save the nifti data numpy_data (numpy.ndarray): The numpy array to save to nifti Returns: String. The expanded filename that now holds the nifti data
ndio/convert/nifti.py
def save(nifti_filename, numpy_data): """ Export a numpy array to a nifti file. TODO: currently using dummy headers and identity matrix affine transform. This can be expanded. Arguments: nifti_filename (str): A filename to which to save the nifti data numpy_data (numpy.ndarray): The numpy array to save to nifti Returns: String. The expanded filename that now holds the nifti data """ # Expand filename to be absolute nifti_filename = os.path.expanduser(nifti_filename) try: nifti_img = nib.Nifti1Image(numpy_data, numpy.eye(4)) nib.save(nifti_img, nifti_filename) except Exception as e: raise ValueError("Could not save file {0}.".format(nifti_filename)) return nifti_filename
def save(nifti_filename, numpy_data): """ Export a numpy array to a nifti file. TODO: currently using dummy headers and identity matrix affine transform. This can be expanded. Arguments: nifti_filename (str): A filename to which to save the nifti data numpy_data (numpy.ndarray): The numpy array to save to nifti Returns: String. The expanded filename that now holds the nifti data """ # Expand filename to be absolute nifti_filename = os.path.expanduser(nifti_filename) try: nifti_img = nib.Nifti1Image(numpy_data, numpy.eye(4)) nib.save(nifti_img, nifti_filename) except Exception as e: raise ValueError("Could not save file {0}.".format(nifti_filename)) return nifti_filename
[ "Export", "a", "numpy", "array", "to", "a", "nifti", "file", ".", "TODO", ":", "currently", "using", "dummy", "headers", "and", "identity", "matrix", "affine", "transform", ".", "This", "can", "be", "expanded", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/nifti.py#L34-L55
[ "def", "save", "(", "nifti_filename", ",", "numpy_data", ")", ":", "# Expand filename to be absolute", "nifti_filename", "=", "os", ".", "path", ".", "expanduser", "(", "nifti_filename", ")", "try", ":", "nifti_img", "=", "nib", ".", "Nifti1Image", "(", "numpy_data", ",", "numpy", ".", "eye", "(", "4", ")", ")", "nib", ".", "save", "(", "nifti_img", ",", "nifti_filename", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not save file {0}.\"", ".", "format", "(", "nifti_filename", ")", ")", "return", "nifti_filename" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.ping
Return the status-code of the API (estimated using the public-tokens lookup page). Arguments: suffix (str : 'public_tokens/'): The url endpoint to check Returns: int: status code
ndio/remote/neuroRemote.py
def ping(self, suffix='public_tokens/'): """ Return the status-code of the API (estimated using the public-tokens lookup page). Arguments: suffix (str : 'public_tokens/'): The url endpoint to check Returns: int: status code """ return self.remote_utils.ping(super(neuroRemote, self).url(), suffix)
def ping(self, suffix='public_tokens/'): """ Return the status-code of the API (estimated using the public-tokens lookup page). Arguments: suffix (str : 'public_tokens/'): The url endpoint to check Returns: int: status code """ return self.remote_utils.ping(super(neuroRemote, self).url(), suffix)
[ "Return", "the", "status", "-", "code", "of", "the", "API", "(", "estimated", "using", "the", "public", "-", "tokens", "lookup", "page", ")", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L120-L131
[ "def", "ping", "(", "self", ",", "suffix", "=", "'public_tokens/'", ")", ":", "return", "self", ".", "remote_utils", ".", "ping", "(", "super", "(", "neuroRemote", ",", "self", ")", ".", "url", "(", ")", ",", "suffix", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.url
Return a constructed URL, appending an optional suffix (uri path). Arguments: suffix (str : ""): The suffix to append to the end of the URL Returns: str: The complete URL
ndio/remote/neuroRemote.py
def url(self, suffix=""): """ Return a constructed URL, appending an optional suffix (uri path). Arguments: suffix (str : ""): The suffix to append to the end of the URL Returns: str: The complete URL """ return super(neuroRemote, self).url('{}/'.format(self._ext) + suffix)
def url(self, suffix=""): """ Return a constructed URL, appending an optional suffix (uri path). Arguments: suffix (str : ""): The suffix to append to the end of the URL Returns: str: The complete URL """ return super(neuroRemote, self).url('{}/'.format(self._ext) + suffix)
[ "Return", "a", "constructed", "URL", "appending", "an", "optional", "suffix", "(", "uri", "path", ")", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L133-L144
[ "def", "url", "(", "self", ",", "suffix", "=", "\"\"", ")", ":", "return", "super", "(", "neuroRemote", ",", "self", ")", ".", "url", "(", "'{}/'", ".", "format", "(", "self", ".", "_ext", ")", "+", "suffix", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.reserve_ids
Requests a list of next-available-IDs from the server. Arguments: quantity (int): The number of IDs to reserve Returns: int[quantity]: List of IDs you've been granted
ndio/remote/neuroRemote.py
def reserve_ids(self, token, channel, quantity): """ Requests a list of next-available-IDs from the server. Arguments: quantity (int): The number of IDs to reserve Returns: int[quantity]: List of IDs you've been granted """ quantity = str(quantity) url = self.url("{}/{}/reserve/{}/".format(token, channel, quantity)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Invalid req: ' + req.status_code) out = req.json() return [out[0] + i for i in range(out[1])]
def reserve_ids(self, token, channel, quantity): """ Requests a list of next-available-IDs from the server. Arguments: quantity (int): The number of IDs to reserve Returns: int[quantity]: List of IDs you've been granted """ quantity = str(quantity) url = self.url("{}/{}/reserve/{}/".format(token, channel, quantity)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Invalid req: ' + req.status_code) out = req.json() return [out[0] + i for i in range(out[1])]
[ "Requests", "a", "list", "of", "next", "-", "available", "-", "IDs", "from", "the", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L182-L198
[ "def", "reserve_ids", "(", "self", ",", "token", ",", "channel", ",", "quantity", ")", ":", "quantity", "=", "str", "(", "quantity", ")", "url", "=", "self", ".", "url", "(", "\"{}/{}/reserve/{}/\"", ".", "format", "(", "token", ",", "channel", ",", "quantity", ")", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataNotFoundError", "(", "'Invalid req: '", "+", "req", ".", "status_code", ")", "out", "=", "req", ".", "json", "(", ")", "return", "[", "out", "[", "0", "]", "+", "i", "for", "i", "in", "range", "(", "out", "[", "1", "]", ")", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.merge_ids
Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore
ndio/remote/neuroRemote.py
def merge_ids(self, token, channel, ids, delete=False): """ Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore """ url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not merge ids {}'.format( ','.join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
def merge_ids(self, token, channel, ids, delete=False): """ Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore """ url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not merge ids {}'.format( ','.join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
[ "Call", "the", "restful", "endpoint", "to", "merge", "two", "RAMON", "objects", "into", "one", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L201-L221
[ "def", "merge_ids", "(", "self", ",", "token", ",", "channel", ",", "ids", ",", "delete", "=", "False", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/merge/{}/\"", ".", "format", "(", "','", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "ids", "]", ")", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataUploadError", "(", "'Could not merge ids {}'", ".", "format", "(", "','", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "ids", "]", ")", ")", ")", "if", "delete", ":", "self", ".", "delete_ramon", "(", "token", ",", "channel", ",", "ids", "[", "1", ":", "]", ")", "return", "True" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.create_channels
Creates channels given a dictionary in 'new_channels_data' , 'dataset' name, and 'token' (project) name. Arguments: token (str): Token to identify project dataset (str): Dataset name to identify dataset to download from new_channels_data (dict): New channel data to upload into new channels Returns: bool: Process completed succesfully or not
ndio/remote/neuroRemote.py
def create_channels(self, dataset, token, new_channels_data): """ Creates channels given a dictionary in 'new_channels_data' , 'dataset' name, and 'token' (project) name. Arguments: token (str): Token to identify project dataset (str): Dataset name to identify dataset to download from new_channels_data (dict): New channel data to upload into new channels Returns: bool: Process completed succesfully or not """ channels = {} for channel_new in new_channels_data: self._check_channel(channel_new.name) if channel_new.channel_type not in ['image', 'annotation']: raise ValueError('Channel type must be ' + 'neuroRemote.IMAGE or ' + 'neuroRemote.ANNOTATION.') if channel_new.readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") channels[channel_new.name] = { "channel_name": channel_new.name, "channel_type": channel_new.channel_type, "datatype": channel_new.dtype, "readonly": channel_new.readonly * 1 } req = requests.post(self.url("/{}/project/".format(dataset) + "{}".format(token)), json={"channels": {channels}}, verify=False) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) else: return True
def create_channels(self, dataset, token, new_channels_data): """ Creates channels given a dictionary in 'new_channels_data' , 'dataset' name, and 'token' (project) name. Arguments: token (str): Token to identify project dataset (str): Dataset name to identify dataset to download from new_channels_data (dict): New channel data to upload into new channels Returns: bool: Process completed succesfully or not """ channels = {} for channel_new in new_channels_data: self._check_channel(channel_new.name) if channel_new.channel_type not in ['image', 'annotation']: raise ValueError('Channel type must be ' + 'neuroRemote.IMAGE or ' + 'neuroRemote.ANNOTATION.') if channel_new.readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") channels[channel_new.name] = { "channel_name": channel_new.name, "channel_type": channel_new.channel_type, "datatype": channel_new.dtype, "readonly": channel_new.readonly * 1 } req = requests.post(self.url("/{}/project/".format(dataset) + "{}".format(token)), json={"channels": {channels}}, verify=False) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) else: return True
[ "Creates", "channels", "given", "a", "dictionary", "in", "new_channels_data", "dataset", "name", "and", "token", "(", "project", ")", "name", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L224-L264
[ "def", "create_channels", "(", "self", ",", "dataset", ",", "token", ",", "new_channels_data", ")", ":", "channels", "=", "{", "}", "for", "channel_new", "in", "new_channels_data", ":", "self", ".", "_check_channel", "(", "channel_new", ".", "name", ")", "if", "channel_new", ".", "channel_type", "not", "in", "[", "'image'", ",", "'annotation'", "]", ":", "raise", "ValueError", "(", "'Channel type must be '", "+", "'neuroRemote.IMAGE or '", "+", "'neuroRemote.ANNOTATION.'", ")", "if", "channel_new", ".", "readonly", "*", "1", "not", "in", "[", "0", ",", "1", "]", ":", "raise", "ValueError", "(", "\"readonly must be 0 (False) or 1 (True).\"", ")", "channels", "[", "channel_new", ".", "name", "]", "=", "{", "\"channel_name\"", ":", "channel_new", ".", "name", ",", "\"channel_type\"", ":", "channel_new", ".", "channel_type", ",", "\"datatype\"", ":", "channel_new", ".", "dtype", ",", "\"readonly\"", ":", "channel_new", ".", "readonly", "*", "1", "}", "req", "=", "requests", ".", "post", "(", "self", ".", "url", "(", "\"/{}/project/\"", ".", "format", "(", "dataset", ")", "+", "\"{}\"", ".", "format", "(", "token", ")", ")", ",", "json", "=", "{", "\"channels\"", ":", "{", "channels", "}", "}", ",", "verify", "=", "False", ")", "if", "req", ".", "status_code", "is", "not", "201", ":", "raise", "RemoteDataUploadError", "(", "'Could not upload {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "True" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.propagate
Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success
ndio/remote/neuroRemote.py
def propagate(self, token, channel): """ Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success """ if self.get_propagate_status(token, channel) != u'0': return url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) return True
def propagate(self, token, channel): """ Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success """ if self.get_propagate_status(token, channel) != u'0': return url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) return True
[ "Kick", "off", "the", "propagate", "function", "on", "the", "remote", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L269-L286
[ "def", "propagate", "(", "self", ",", "token", ",", "channel", ")", ":", "if", "self", ".", "get_propagate_status", "(", "token", ",", "channel", ")", "!=", "u'0'", ":", "return", "url", "=", "self", ".", "url", "(", "'sd/{}/{}/setPropagate/1/'", ".", "format", "(", "token", ",", "channel", ")", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataUploadError", "(", "'Propagate fail: {}'", ".", "format", "(", "req", ".", "text", ")", ")", "return", "True" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neuroRemote.get_propagate_status
Get the propagate status for a token/channel pair. Arguments: token (str): The token to check channel (str): The channel to check Returns: str: The status code
ndio/remote/neuroRemote.py
def get_propagate_status(self, token, channel): """ Get the propagate status for a token/channel pair. Arguments: token (str): The token to check channel (str): The channel to check Returns: str: The status code """ url = self.url('sd/{}/{}/getPropagate/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise ValueError('Bad pair: {}/{}'.format(token, channel)) return req.text
def get_propagate_status(self, token, channel): """ Get the propagate status for a token/channel pair. Arguments: token (str): The token to check channel (str): The channel to check Returns: str: The status code """ url = self.url('sd/{}/{}/getPropagate/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise ValueError('Bad pair: {}/{}'.format(token, channel)) return req.text
[ "Get", "the", "propagate", "status", "for", "a", "token", "/", "channel", "pair", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neuroRemote.py#L289-L304
[ "def", "get_propagate_status", "(", "self", ",", "token", ",", "channel", ")", ":", "url", "=", "self", ".", "url", "(", "'sd/{}/{}/getPropagate/'", ".", "format", "(", "token", ",", "channel", ")", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "ValueError", "(", "'Bad pair: {}/{}'", ".", "format", "(", "token", ",", "channel", ")", ")", "return", "req", ".", "text" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.create_project
Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created.
ndio/remote/resources.py
def create_project(self, project_name, dataset_name, hostname, is_public, s3backend=0, kvserver='localhost', kvengine='MySQL', mdengine='MySQL', description=''): """ Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created. """ url = self.url() + "/resource/dataset/{}".format( dataset_name) + "/project/{}/".format(project_name) json = { "project_name": project_name, "host": hostname, "s3backend": s3backend, "public": is_public, "kvserver": kvserver, "kvengine": kvengine, "mdengine": mdengine, "project_description": description } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req)) if req.content == "" or req.content == b'': return True else: return False
def create_project(self, project_name, dataset_name, hostname, is_public, s3backend=0, kvserver='localhost', kvengine='MySQL', mdengine='MySQL', description=''): """ Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created. """ url = self.url() + "/resource/dataset/{}".format( dataset_name) + "/project/{}/".format(project_name) json = { "project_name": project_name, "host": hostname, "s3backend": s3backend, "public": is_public, "kvserver": kvserver, "kvengine": kvengine, "mdengine": mdengine, "project_description": description } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req)) if req.content == "" or req.content == b'': return True else: return False
[ "Creates", "a", "project", "with", "the", "given", "parameters", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L70-L119
[ "def", "create_project", "(", "self", ",", "project_name", ",", "dataset_name", ",", "hostname", ",", "is_public", ",", "s3backend", "=", "0", ",", "kvserver", "=", "'localhost'", ",", "kvengine", "=", "'MySQL'", ",", "mdengine", "=", "'MySQL'", ",", "description", "=", "''", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/{}/\"", ".", "format", "(", "project_name", ")", "json", "=", "{", "\"project_name\"", ":", "project_name", ",", "\"host\"", ":", "hostname", ",", "\"s3backend\"", ":", "s3backend", ",", "\"public\"", ":", "is_public", ",", "\"kvserver\"", ":", "kvserver", ",", "\"kvengine\"", ":", "kvengine", ",", "\"mdengine\"", ":", "mdengine", ",", "\"project_description\"", ":", "description", "}", "req", "=", "self", ".", "remote_utils", ".", "post_url", "(", "url", ",", "json", "=", "json", ")", "if", "req", ".", "status_code", "is", "not", "201", ":", "raise", "RemoteDataUploadError", "(", "'Could not upload {}'", ".", "format", "(", "req", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.list_projects
Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query
ndio/remote/resources.py
def list_projects(self, dataset_name): """ Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def list_projects(self, dataset_name): """ Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
[ "Lists", "a", "set", "of", "projects", "related", "to", "a", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L164-L182
[ "def", "list_projects", "(", "self", ",", "dataset_name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/\"", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataNotFoundError", "(", "'Could not find {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.create_token
Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created.
ndio/remote/resources.py
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ url = self.url() + '/nd/resource/dataset/{}'.format( dataset_name) + '/project/{}'.format(project_name) + \ '/token/{}/'.format(token_name) json = { "token_name": token_name, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Cout not upload {}:'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ url = self.url() + '/nd/resource/dataset/{}'.format( dataset_name) + '/project/{}'.format(project_name) + \ '/token/{}/'.format(token_name) json = { "token_name": token_name, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Cout not upload {}:'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
[ "Creates", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "is_public", "(", "int", ")", ":", "1", "is", "public", ".", "0", "is", "not", "public", "Returns", ":", "bool", ":", "True", "if", "project", "created", "false", "if", "not", "created", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L184-L215
[ "def", "create_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ",", "is_public", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "'/nd/resource/dataset/{}'", ".", "format", "(", "dataset_name", ")", "+", "'/project/{}'", ".", "format", "(", "project_name", ")", "+", "'/token/{}/'", ".", "format", "(", "token_name", ")", "json", "=", "{", "\"token_name\"", ":", "token_name", ",", "\"public\"", ":", "is_public", "}", "req", "=", "self", ".", "remote_utils", ".", "post_url", "(", "url", ",", "json", "=", "json", ")", "if", "req", ".", "status_code", "is", "not", "201", ":", "raise", "RemoteDataUploadError", "(", "'Cout not upload {}:'", ".", "format", "(", "req", ".", "text", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.get_token
Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info
ndio/remote/resources.py
def get_token(self, token_name, project_name, dataset_name): """ Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not find {}'.format(req.text)) else: return req.json()
def get_token(self, token_name, project_name, dataset_name): """ Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not find {}'.format(req.text)) else: return req.json()
[ "Get", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "Returns", ":", "dict", ":", "Token", "info" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L217-L238
[ "def", "get_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/{}\"", ".", "format", "(", "project_name", ")", "+", "\"/token/{}/\"", ".", "format", "(", "token_name", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataUploadError", "(", "'Could not find {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.delete_token
Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted.
ndio/remote/resources.py
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError("Could not delete {}".format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError("Could not delete {}".format(req.text)) if req.content == "" or req.content == b'': return True else: return False
[ "Delete", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "channel_name", "(", "str", ")", ":", "Channel", "name", "project", "is", "based", "on", "Returns", ":", "bool", ":", "True", "if", "project", "deleted", "false", "if", "not", "deleted", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L240-L264
[ "def", "delete_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/{}\"", ".", "format", "(", "project_name", ")", "+", "\"/token/{}/\"", ".", "format", "(", "token_name", ")", "req", "=", "self", ".", "remote_utils", ".", "delete_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "204", ":", "raise", "RemoteDataUploadError", "(", "\"Could not delete {}\"", ".", "format", "(", "req", ".", "text", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.list_tokens
Lists a set of tokens that are public in Neurodata. Arguments: Returns: dict: Public tokens found in Neurodata
ndio/remote/resources.py
def list_tokens(self): """ Lists a set of tokens that are public in Neurodata. Arguments: Returns: dict: Public tokens found in Neurodata """ url = self.url() + "/nd/resource/public/token/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Coud not find {}'.format(req.text)) else: return req.json()
def list_tokens(self): """ Lists a set of tokens that are public in Neurodata. Arguments: Returns: dict: Public tokens found in Neurodata """ url = self.url() + "/nd/resource/public/token/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Coud not find {}'.format(req.text)) else: return req.json()
[ "Lists", "a", "set", "of", "tokens", "that", "are", "public", "in", "Neurodata", ".", "Arguments", ":", "Returns", ":", "dict", ":", "Public", "tokens", "found", "in", "Neurodata" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L266-L279
[ "def", "list_tokens", "(", "self", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/public/token/\"", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataNotFoundError", "(", "'Coud not find {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.create_dataset
Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not
ndio/remote/resources.py
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): """ Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not """ url = self.url() + "/resource/dataset/{}".format(name) json = { "dataset_name": name, "ximagesize": x_img_size, "yimagesize": y_img_size, "zimagesize": z_img_size, "xvoxelres": x_vox_res, "yvoxelres": y_vox_res, "zvoxelres": z_vox_res, "xoffset": x_offset, "yoffset": y_offset, "zoffset": z_offset, "scalinglevels": scaling_levels, "scalingoption": scaling_option, "dataset_description": dataset_description, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): """ Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not """ url = self.url() + "/resource/dataset/{}".format(name) json = { "dataset_name": name, "ximagesize": x_img_size, "yimagesize": y_img_size, "zimagesize": z_img_size, "xvoxelres": x_vox_res, "yvoxelres": y_vox_res, "zvoxelres": z_vox_res, "xoffset": x_offset, "yoffset": y_offset, "zoffset": z_offset, "scalinglevels": scaling_levels, "scalingoption": scaling_option, "dataset_description": dataset_description, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
[ "Creates", "a", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L281-L343
[ "def", "create_dataset", "(", "self", ",", "name", ",", "x_img_size", ",", "y_img_size", ",", "z_img_size", ",", "x_vox_res", ",", "y_vox_res", ",", "z_vox_res", ",", "x_offset", "=", "0", ",", "y_offset", "=", "0", ",", "z_offset", "=", "0", ",", "scaling_levels", "=", "0", ",", "scaling_option", "=", "0", ",", "dataset_description", "=", "\"\"", ",", "is_public", "=", "0", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/resource/dataset/{}\"", ".", "format", "(", "name", ")", "json", "=", "{", "\"dataset_name\"", ":", "name", ",", "\"ximagesize\"", ":", "x_img_size", ",", "\"yimagesize\"", ":", "y_img_size", ",", "\"zimagesize\"", ":", "z_img_size", ",", "\"xvoxelres\"", ":", "x_vox_res", ",", "\"yvoxelres\"", ":", "y_vox_res", ",", "\"zvoxelres\"", ":", "z_vox_res", ",", "\"xoffset\"", ":", "x_offset", ",", "\"yoffset\"", ":", "y_offset", ",", "\"zoffset\"", ":", "z_offset", ",", "\"scalinglevels\"", ":", "scaling_levels", ",", "\"scalingoption\"", ":", "scaling_option", ",", "\"dataset_description\"", ":", "dataset_description", ",", "\"public\"", ":", "is_public", "}", "req", "=", "self", ".", "remote_utils", ".", "post_url", "(", "url", ",", "json", "=", "json", ")", "if", "req", ".", "status_code", "is", "not", "201", ":", "raise", "RemoteDataUploadError", "(", "'Could not upload {}'", ".", "format", "(", "req", ".", "text", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.get_dataset
Returns info regarding a particular dataset. Arugments: name (str): Dataset name Returns: dict: Dataset information
ndio/remote/resources.py
def get_dataset(self, name): """ Returns info regarding a particular dataset. Arugments: name (str): Dataset name Returns: dict: Dataset information """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def get_dataset(self, name): """ Returns info regarding a particular dataset. Arugments: name (str): Dataset name Returns: dict: Dataset information """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
[ "Returns", "info", "regarding", "a", "particular", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L345-L361
[ "def", "get_dataset", "(", "self", ",", "name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/resource/dataset/{}\"", ".", "format", "(", "name", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataNotFoundError", "(", "'Could not find {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.list_datasets
Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format
ndio/remote/resources.py
def list_datasets(self, get_global_public): """ Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format """ appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def list_datasets(self, get_global_public): """ Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format """ appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
[ "Lists", "datasets", "in", "resources", ".", "Setting", "get_global_public", "to", "True", "will", "retrieve", "all", "public", "datasets", "in", "cloud", ".", "False", "will", "get", "user", "s", "public", "datasets", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L363-L387
[ "def", "list_datasets", "(", "self", ",", "get_global_public", ")", ":", "appending", "=", "\"\"", "if", "get_global_public", ":", "appending", "=", "\"public\"", "url", "=", "self", ".", "url", "(", ")", "+", "\"/resource/{}dataset/\"", ".", "format", "(", "appending", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataNotFoundError", "(", "'Could not find {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.delete_dataset
Arguments: name (str): Name of dataset to delete Returns: bool: True if dataset deleted, False if not
ndio/remote/resources.py
def delete_dataset(self, name): """ Arguments: name (str): Name of dataset to delete Returns: bool: True if dataset deleted, False if not """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError('Could not delete {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def delete_dataset(self, name): """ Arguments: name (str): Name of dataset to delete Returns: bool: True if dataset deleted, False if not """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError('Could not delete {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
[ "Arguments", ":", "name", "(", "str", ")", ":", "Name", "of", "dataset", "to", "delete" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L389-L405
[ "def", "delete_dataset", "(", "self", ",", "name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/resource/dataset/{}\"", ".", "format", "(", "name", ")", "req", "=", "self", ".", "remote_utils", ".", "delete_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "204", ":", "raise", "RemoteDataUploadError", "(", "'Could not delete {}'", ".", "format", "(", "req", ".", "text", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.create_channel
Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason.
ndio/remote/resources.py
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ self._check_channel(channel_name) if channel_type not in ['image', 'annotation', 'timeseries']: raise ValueError('Channel type must be ' + 'neurodata.IMAGE or neurodata.ANNOTATION.') if readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") # Good job! You supplied very nice arguments. url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) json = { "channel_name": channel_name, "channel_type": channel_type, "channel_datatype": dtype, "startwindow": startwindow, "endwindow": endwindow, 'starttime': start_time, 'endtime': end_time, 'readonly': readonly, 'propagate': propagate, 'resolution': resolution, 'channel_description': channel_description } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ self._check_channel(channel_name) if channel_type not in ['image', 'annotation', 'timeseries']: raise ValueError('Channel type must be ' + 'neurodata.IMAGE or neurodata.ANNOTATION.') if readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") # Good job! You supplied very nice arguments. url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) json = { "channel_name": channel_name, "channel_type": channel_type, "channel_datatype": dtype, "startwindow": startwindow, "endwindow": endwindow, 'starttime': start_time, 'endtime': end_time, 'readonly': readonly, 'propagate': propagate, 'resolution': resolution, 'channel_description': channel_description } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
[ "Create", "a", "new", "channel", "on", "the", "Remote", "using", "channel_data", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L415-L487
[ "def", "create_channel", "(", "self", ",", "channel_name", ",", "project_name", ",", "dataset_name", ",", "channel_type", ",", "dtype", ",", "startwindow", ",", "endwindow", ",", "readonly", "=", "0", ",", "start_time", "=", "0", ",", "end_time", "=", "0", ",", "propagate", "=", "0", ",", "resolution", "=", "0", ",", "channel_description", "=", "''", ")", ":", "self", ".", "_check_channel", "(", "channel_name", ")", "if", "channel_type", "not", "in", "[", "'image'", ",", "'annotation'", ",", "'timeseries'", "]", ":", "raise", "ValueError", "(", "'Channel type must be '", "+", "'neurodata.IMAGE or neurodata.ANNOTATION.'", ")", "if", "readonly", "*", "1", "not", "in", "[", "0", ",", "1", "]", ":", "raise", "ValueError", "(", "\"readonly must be 0 (False) or 1 (True).\"", ")", "# Good job! You supplied very nice arguments.", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/{}\"", ".", "format", "(", "project_name", ")", "+", "\"/channel/{}/\"", ".", "format", "(", "channel_name", ")", "json", "=", "{", "\"channel_name\"", ":", "channel_name", ",", "\"channel_type\"", ":", "channel_type", ",", "\"channel_datatype\"", ":", "dtype", ",", "\"startwindow\"", ":", "startwindow", ",", "\"endwindow\"", ":", "endwindow", ",", "'starttime'", ":", "start_time", ",", "'endtime'", ":", "end_time", ",", "'readonly'", ":", "readonly", ",", "'propagate'", ":", "propagate", ",", "'resolution'", ":", "resolution", ",", "'channel_description'", ":", "channel_description", "}", "req", "=", "self", ".", "remote_utils", ".", "post_url", "(", "url", ",", "json", "=", "json", ")", "if", "req", ".", "status_code", "is", "not", "201", ":", "raise", "RemoteDataUploadError", "(", "'Could not upload {}'", ".", "format", "(", "req", ".", "text", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
resources.get_channel
Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info
ndio/remote/resources.py
def get_channel(self, channel_name, project_name, dataset_name): """ Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
def get_channel(self, channel_name, project_name, dataset_name): """ Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
[ "Gets", "info", "about", "a", "channel", "given", "its", "name", "name", "of", "its", "project", "and", "name", "of", "its", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L489-L511
[ "def", "get_channel", "(", "self", ",", "channel_name", ",", "project_name", ",", "dataset_name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/{}\"", ".", "format", "(", "project_name", ")", "+", "\"/channel/{}/\"", ".", "format", "(", "channel_name", ")", "req", "=", "self", ".", "remote_utils", ".", "get_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "200", ":", "raise", "RemoteDataNotFoundError", "(", "'Could not find {}'", ".", "format", "(", "req", ".", "text", ")", ")", "else", ":", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
Show.parse
Parse show subcommand.
yoda/subcommand/show.py
def parse(self): """Parse show subcommand.""" parser = self.subparser.add_parser( "show", help="Show workspace details", description="Show workspace details.") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help="All workspaces") group.add_argument('name', type=str, help="Workspace name", nargs='?')
def parse(self): """Parse show subcommand.""" parser = self.subparser.add_parser( "show", help="Show workspace details", description="Show workspace details.") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help="All workspaces") group.add_argument('name', type=str, help="Workspace name", nargs='?')
[ "Parse", "show", "subcommand", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/subcommand/show.py#L40-L49
[ "def", "parse", "(", "self", ")", ":", "parser", "=", "self", ".", "subparser", ".", "add_parser", "(", "\"show\"", ",", "help", "=", "\"Show workspace details\"", ",", "description", "=", "\"Show workspace details.\"", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "True", ")", "group", ".", "add_argument", "(", "'--all'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"All workspaces\"", ")", "group", ".", "add_argument", "(", "'name'", ",", "type", "=", "str", ",", "help", "=", "\"Workspace name\"", ",", "nargs", "=", "'?'", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
Show.execute
Execute show subcommand.
yoda/subcommand/show.py
def execute(self, args): """Execute show subcommand.""" if args.name is not None: self.show_workspace(slashes2dash(args.name)) elif args.all is not None: self.show_all()
def execute(self, args): """Execute show subcommand.""" if args.name is not None: self.show_workspace(slashes2dash(args.name)) elif args.all is not None: self.show_all()
[ "Execute", "show", "subcommand", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/subcommand/show.py#L51-L56
[ "def", "execute", "(", "self", ",", "args", ")", ":", "if", "args", ".", "name", "is", "not", "None", ":", "self", ".", "show_workspace", "(", "slashes2dash", "(", "args", ".", "name", ")", ")", "elif", "args", ".", "all", "is", "not", "None", ":", "self", ".", "show_all", "(", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
Show.show_workspace
Show specific workspace.
yoda/subcommand/show.py
def show_workspace(self, name): """Show specific workspace.""" if not self.workspace.exists(name): raise ValueError("Workspace `%s` doesn't exists." % name) color = Color() workspaces = self.workspace.list() self.logger.info("<== %s workspace ==>" % color.colored(name, "green")) self.logger.info("\tPath: %s" % workspaces[name]["path"]) self.logger.info("\tNumber of repositories: %s" % color.colored( len(workspaces[name]["repositories"]), "yellow")) repo_colored = color.colored("Repositories", "blue") path_colored = color.colored("Path", "blue") trepositories = PrettyTable( [repo_colored, path_colored, color.colored("+", "blue")]) trepositories.align[repo_colored] = "l" trepositories.align[path_colored] = "l" for repo_name in workspaces[name]["repositories"]: fullname = "%s/%s" % (name, repo_name) fullpath = find_path(fullname, self.config)[fullname] try: repo = Repository(fullpath) repo_scm = repo.get_scm() except RepositoryAdapterNotFound: repo_scm = None trepositories.add_row( [color.colored(repo_name, "cyan"), fullpath, repo_scm]) self.logger.info(trepositories)
def show_workspace(self, name): """Show specific workspace.""" if not self.workspace.exists(name): raise ValueError("Workspace `%s` doesn't exists." % name) color = Color() workspaces = self.workspace.list() self.logger.info("<== %s workspace ==>" % color.colored(name, "green")) self.logger.info("\tPath: %s" % workspaces[name]["path"]) self.logger.info("\tNumber of repositories: %s" % color.colored( len(workspaces[name]["repositories"]), "yellow")) repo_colored = color.colored("Repositories", "blue") path_colored = color.colored("Path", "blue") trepositories = PrettyTable( [repo_colored, path_colored, color.colored("+", "blue")]) trepositories.align[repo_colored] = "l" trepositories.align[path_colored] = "l" for repo_name in workspaces[name]["repositories"]: fullname = "%s/%s" % (name, repo_name) fullpath = find_path(fullname, self.config)[fullname] try: repo = Repository(fullpath) repo_scm = repo.get_scm() except RepositoryAdapterNotFound: repo_scm = None trepositories.add_row( [color.colored(repo_name, "cyan"), fullpath, repo_scm]) self.logger.info(trepositories)
[ "Show", "specific", "workspace", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/subcommand/show.py#L58-L91
[ "def", "show_workspace", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "workspace", ".", "exists", "(", "name", ")", ":", "raise", "ValueError", "(", "\"Workspace `%s` doesn't exists.\"", "%", "name", ")", "color", "=", "Color", "(", ")", "workspaces", "=", "self", ".", "workspace", ".", "list", "(", ")", "self", ".", "logger", ".", "info", "(", "\"<== %s workspace ==>\"", "%", "color", ".", "colored", "(", "name", ",", "\"green\"", ")", ")", "self", ".", "logger", ".", "info", "(", "\"\\tPath: %s\"", "%", "workspaces", "[", "name", "]", "[", "\"path\"", "]", ")", "self", ".", "logger", ".", "info", "(", "\"\\tNumber of repositories: %s\"", "%", "color", ".", "colored", "(", "len", "(", "workspaces", "[", "name", "]", "[", "\"repositories\"", "]", ")", ",", "\"yellow\"", ")", ")", "repo_colored", "=", "color", ".", "colored", "(", "\"Repositories\"", ",", "\"blue\"", ")", "path_colored", "=", "color", ".", "colored", "(", "\"Path\"", ",", "\"blue\"", ")", "trepositories", "=", "PrettyTable", "(", "[", "repo_colored", ",", "path_colored", ",", "color", ".", "colored", "(", "\"+\"", ",", "\"blue\"", ")", "]", ")", "trepositories", ".", "align", "[", "repo_colored", "]", "=", "\"l\"", "trepositories", ".", "align", "[", "path_colored", "]", "=", "\"l\"", "for", "repo_name", "in", "workspaces", "[", "name", "]", "[", "\"repositories\"", "]", ":", "fullname", "=", "\"%s/%s\"", "%", "(", "name", ",", "repo_name", ")", "fullpath", "=", "find_path", "(", "fullname", ",", "self", ".", "config", ")", "[", "fullname", "]", "try", ":", "repo", "=", "Repository", "(", "fullpath", ")", "repo_scm", "=", "repo", ".", "get_scm", "(", ")", "except", "RepositoryAdapterNotFound", ":", "repo_scm", "=", "None", "trepositories", ".", "add_row", "(", "[", "color", ".", "colored", "(", "repo_name", ",", "\"cyan\"", ")", ",", "fullpath", ",", "repo_scm", "]", ")", "self", ".", "logger", ".", "info", "(", "trepositories", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
Show.show_all
Show details for all workspaces.
yoda/subcommand/show.py
def show_all(self): """Show details for all workspaces.""" for ws in self.workspace.list().keys(): self.show_workspace(ws) print("\n\n")
def show_all(self): """Show details for all workspaces.""" for ws in self.workspace.list().keys(): self.show_workspace(ws) print("\n\n")
[ "Show", "details", "for", "all", "workspaces", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/subcommand/show.py#L93-L97
[ "def", "show_all", "(", "self", ")", ":", "for", "ws", "in", "self", ".", "workspace", ".", "list", "(", ")", ".", "keys", "(", ")", ":", "self", ".", "show_workspace", "(", "ws", ")", "print", "(", "\"\\n\\n\"", ")" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
Remote.url
Get the base URL of the Remote. Arguments: None Returns: `str` base URL
ndio/remote/Remote.py
def url(self, endpoint=''): """ Get the base URL of the Remote. Arguments: None Returns: `str` base URL """ if not endpoint.startswith('/'): endpoint = "/" + endpoint return self.protocol + "://" + self.hostname + endpoint
def url(self, endpoint=''): """ Get the base URL of the Remote. Arguments: None Returns: `str` base URL """ if not endpoint.startswith('/'): endpoint = "/" + endpoint return self.protocol + "://" + self.hostname + endpoint
[ "Get", "the", "base", "URL", "of", "the", "Remote", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/Remote.py#L21-L32
[ "def", "url", "(", "self", ",", "endpoint", "=", "''", ")", ":", "if", "not", "endpoint", ".", "startswith", "(", "'/'", ")", ":", "endpoint", "=", "\"/\"", "+", "endpoint", "return", "self", ".", "protocol", "+", "\"://\"", "+", "self", ".", "hostname", "+", "endpoint" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
Remote.ping
Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code)
ndio/remote/Remote.py
def ping(self, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = requests.get(self.url() + "/" + endpoint) return r.status_code
def ping(self, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = requests.get(self.url() + "/" + endpoint) return r.status_code
[ "Ping", "the", "server", "to", "make", "sure", "that", "you", "can", "access", "the", "base", "URL", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/Remote.py#L34-L44
[ "def", "ping", "(", "self", ",", "endpoint", "=", "''", ")", ":", "r", "=", "requests", ".", "get", "(", "self", ".", "url", "(", ")", "+", "\"/\"", "+", "endpoint", ")", "return", "r", ".", "status_code" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
export_dae
Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
ndio/utils/mesh.py
def export_dae(filename, cutout, level=0): """ Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".dae" not in filename: filename = filename + ".dae" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_mesh(vs, fs, filename, "ndioexport")
def export_dae(filename, cutout, level=0): """ Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".dae" not in filename: filename = filename + ".dae" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_mesh(vs, fs, filename, "ndioexport")
[ "Converts", "a", "dense", "annotation", "to", "a", "DAE", "using", "Marching", "Cubes", "(", "PyMCubes", ")", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/utils/mesh.py#L5-L21
[ "def", "export_dae", "(", "filename", ",", "cutout", ",", "level", "=", "0", ")", ":", "if", "\".dae\"", "not", "in", "filename", ":", "filename", "=", "filename", "+", "\".dae\"", "vs", ",", "fs", "=", "mcubes", ".", "marching_cubes", "(", "cutout", ",", "level", ")", "mcubes", ".", "export_mesh", "(", "vs", ",", "fs", ",", "filename", ",", "\"ndioexport\"", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
export_obj
Converts a dense annotation to a obj, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
ndio/utils/mesh.py
def export_obj(filename, cutout, level=0): """ Converts a dense annotation to a obj, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".obj" not in filename: filename = filename + ".obj" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_obj(vs, fs, filename)
def export_obj(filename, cutout, level=0): """ Converts a dense annotation to a obj, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".obj" not in filename: filename = filename + ".obj" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_obj(vs, fs, filename)
[ "Converts", "a", "dense", "annotation", "to", "a", "obj", "using", "Marching", "Cubes", "(", "PyMCubes", ")", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/utils/mesh.py#L24-L40
[ "def", "export_obj", "(", "filename", ",", "cutout", ",", "level", "=", "0", ")", ":", "if", "\".obj\"", "not", "in", "filename", ":", "filename", "=", "filename", "+", "\".obj\"", "vs", ",", "fs", "=", "mcubes", ".", "marching_cubes", "(", "cutout", ",", "level", ")", "mcubes", ".", "export_obj", "(", "vs", ",", "fs", ",", "filename", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
export_ply
Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
ndio/utils/mesh.py
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
[ "Converts", "a", "dense", "annotation", "to", "a", ".", "PLY", "using", "Marching", "Cubes", "(", "PyMCubes", ")", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/utils/mesh.py#L43-L77
[ "def", "export_ply", "(", "filename", ",", "cutout", ",", "level", "=", "0", ")", ":", "if", "\".ply\"", "not", "in", "filename", ":", "filename", "=", "filename", "+", "\".ply\"", "vs", ",", "fs", "=", "mcubes", ".", "marching_cubes", "(", "cutout", ",", "level", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fh", ":", "lines", "=", "[", "\"ply\"", "\"format ascii 1.0\"", ",", "\"comment generated by ndio\"", ",", "\"element vertex \"", "+", "str", "(", "len", "(", "vs", ")", ")", ",", "\"property float32 x\"", ",", "\"property float32 y\"", ",", "\"property float32 z\"", ",", "\"element face \"", "+", "str", "(", "len", "(", "fs", ")", ")", ",", "\"property list uint8 int32 vertex_index\"", ",", "\"end_header\"", "]", "fh", ".", "writelines", "(", "lines", ")", "for", "v", "in", "vs", ":", "fh", ".", "write", "(", "\"{} {} {}\"", ".", "format", "(", "v", "[", "0", "]", ",", "v", "[", "1", "]", ",", "v", "[", "2", "]", ")", ")", "for", "f", "in", "fs", ":", "fh", ".", "write", "(", "\"3 {} {} {}\"", ".", "format", "(", "f", "[", "0", "]", ",", "f", "[", "1", "]", ",", "f", "[", "2", "]", ")", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
_guess_format_from_extension
Guess the appropriate data type from file extension. Arguments: ext: The file extension (period optional) Returns: String. The format (without leading period), or False if none was found or couldn't be guessed
ndio/convert/convert.py
def _guess_format_from_extension(ext): """ Guess the appropriate data type from file extension. Arguments: ext: The file extension (period optional) Returns: String. The format (without leading period), or False if none was found or couldn't be guessed """ ext = ext.strip('.') # We look through FILE_FORMATS for this extension. # - If it appears zero times, return False. We can't guess. # - If it appears once, we can simply return that format. # - If it appears more than once, we can't guess (it's ambiguous, # e.g .m = RAMON or MATLAB) formats = [] for fmt in FILE_FORMATS: if ext in FILE_FORMATS[fmt]: formats.append(fmt) if formats == [] or len(formats) > 1: return False return formats[0]
def _guess_format_from_extension(ext): """ Guess the appropriate data type from file extension. Arguments: ext: The file extension (period optional) Returns: String. The format (without leading period), or False if none was found or couldn't be guessed """ ext = ext.strip('.') # We look through FILE_FORMATS for this extension. # - If it appears zero times, return False. We can't guess. # - If it appears once, we can simply return that format. # - If it appears more than once, we can't guess (it's ambiguous, # e.g .m = RAMON or MATLAB) formats = [] for fmt in FILE_FORMATS: if ext in FILE_FORMATS[fmt]: formats.append(fmt) if formats == [] or len(formats) > 1: return False return formats[0]
[ "Guess", "the", "appropriate", "data", "type", "from", "file", "extension", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/convert.py#L44-L71
[ "def", "_guess_format_from_extension", "(", "ext", ")", ":", "ext", "=", "ext", ".", "strip", "(", "'.'", ")", "# We look through FILE_FORMATS for this extension.", "# - If it appears zero times, return False. We can't guess.", "# - If it appears once, we can simply return that format.", "# - If it appears more than once, we can't guess (it's ambiguous,", "# e.g .m = RAMON or MATLAB)", "formats", "=", "[", "]", "for", "fmt", "in", "FILE_FORMATS", ":", "if", "ext", "in", "FILE_FORMATS", "[", "fmt", "]", ":", "formats", ".", "append", "(", "fmt", ")", "if", "formats", "==", "[", "]", "or", "len", "(", "formats", ")", ">", "1", ":", "return", "False", "return", "formats", "[", "0", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
open
Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray
ndio/convert/convert.py
def open(in_file, in_fmt=None): """ Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray """ fmt = in_file.split('.')[-1] if in_fmt: fmt = in_fmt fmt = fmt.lower() if fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']: return Image.open(in_file) else: raise NotImplementedError("Cannot open file of type {fmt}".format(fmt))
def open(in_file, in_fmt=None): """ Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray """ fmt = in_file.split('.')[-1] if in_fmt: fmt = in_fmt fmt = fmt.lower() if fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']: return Image.open(in_file) else: raise NotImplementedError("Cannot open file of type {fmt}".format(fmt))
[ "Reads", "in", "a", "file", "from", "disk", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/convert.py#L74-L93
[ "def", "open", "(", "in_file", ",", "in_fmt", "=", "None", ")", ":", "fmt", "=", "in_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "in_fmt", ":", "fmt", "=", "in_fmt", "fmt", "=", "fmt", ".", "lower", "(", ")", "if", "fmt", "in", "[", "'png'", ",", "'jpg'", ",", "'tiff'", ",", "'tif'", ",", "'jpeg'", "]", ":", "return", "Image", ".", "open", "(", "in_file", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Cannot open file of type {fmt}\"", ".", "format", "(", "fmt", ")", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
convert
Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename
ndio/convert/convert.py
def convert(in_file, out_file, in_fmt="", out_fmt=""): """ Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename """ # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
def convert(in_file, out_file, in_fmt="", out_fmt=""): """ Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename """ # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
[ "Converts", "in_file", "to", "out_file", "guessing", "datatype", "in", "the", "absence", "of", "in_fmt", "and", "out_fmt", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/convert.py#L96-L161
[ "def", "convert", "(", "in_file", ",", "out_file", ",", "in_fmt", "=", "\"\"", ",", "out_fmt", "=", "\"\"", ")", ":", "# First verify that in_file exists and out_file doesn't.", "in_file", "=", "os", ".", "path", ".", "expanduser", "(", "in_file", ")", "out_file", "=", "os", ".", "path", ".", "expanduser", "(", "out_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "in_file", ")", ":", "raise", "IOError", "(", "\"Input file {0} does not exist, stopping...\"", ".", "format", "(", "in_file", ")", ")", "# Get formats, either by explicitly naming them or by guessing.", "# TODO: It'd be neat to check here if an explicit fmt matches the guess.", "in_fmt", "=", "in_fmt", ".", "lower", "(", ")", "or", "_guess_format_from_extension", "(", "in_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "out_fmt", "=", "out_fmt", ".", "lower", "(", ")", "or", "_guess_format_from_extension", "(", "out_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "if", "not", "in_fmt", "or", "not", "out_fmt", ":", "raise", "ValueError", "(", "\"Cannot determine conversion formats.\"", ")", "return", "False", "if", "in_fmt", "is", "out_fmt", ":", "# This is the case when this module (intended for LONI) is used", "# indescriminately to 'funnel' data into one format.", "shutil", ".", "copyfileobj", "(", "in_file", ",", "out_file", ")", "return", "out_file", "# Import", "if", "in_fmt", "==", "'hdf5'", ":", "from", ".", "import", "hdf5", "data", "=", "hdf5", ".", "load", "(", "in_file", ")", "elif", "in_fmt", "==", "'tiff'", ":", "from", ".", "import", "tiff", "data", "=", "tiff", ".", "load", "(", "in_file", ")", "elif", "in_fmt", "==", "'png'", ":", "from", ".", "import", "png", "data", "=", "png", ".", "load", "(", "in_file", ")", "else", ":", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")", "# Export", "if", "out_fmt", "==", "'hdf5'", ":", "from", ".", "import", "hdf5", "return", "hdf5", ".", "save", "(", "out_file", ",", "data", ")", "elif", "out_fmt", "==", "'tiff'", ":", "from", ".", "import", "tiff", "return", "tiff", ".", "save", "(", "out_file", ",", "data", ")", "elif", "out_fmt", "==", "'png'", ":", "from", ".", "import", "png", "return", "png", ".", "export_png", "(", "out_file", ",", "data", ")", "else", ":", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
grute.build_graph
Builds a graph using the graph-services endpoint. Arguments: project (str): The project to use site (str): The site in question subject (str): The subject's identifier session (str): The session (per subject) scan (str): The scan identifier size (str): Whether to return a big (grute.BIG) or small (grute.SMALL) graph. For a better explanation, see m2g.io. email (str : self.email)*: An email to notify invariants (str[]: Invariants.ALL)*: An array of invariants to compute. You can use the grute.Invariants class to construct a list, or simply pass grute.Invariants.ALL to compute them all. fiber_file (str: DEFAULT_FIBER_FILE)*: A local filename of an MRI Studio .dat file atlas_file (str: None)*: A local atlas file, in NIFTI .nii format. If none is specified, the Desikan atlas is used by default. use_threads (bool: False)*: Whether to run the download in a Python thread. If set to True, the call to `build_graph` will end quickly, and the `callback` will be called with the returned status-code of the restful call as its only argument. callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: When the supplied values are invalid (contain invalid characters, bad email address supplied, etc.) RemoteDataNotFoundError: When the data cannot be processed due to a server error.
ndio/remote/grute.py
def build_graph(self, project, site, subject, session, scan, size, email=None, invariants=Invariants.ALL, fiber_file=DEFAULT_FIBER_FILE, atlas_file=None, use_threads=False, callback=None): """ Builds a graph using the graph-services endpoint. Arguments: project (str): The project to use site (str): The site in question subject (str): The subject's identifier session (str): The session (per subject) scan (str): The scan identifier size (str): Whether to return a big (grute.BIG) or small (grute.SMALL) graph. For a better explanation, see m2g.io. email (str : self.email)*: An email to notify invariants (str[]: Invariants.ALL)*: An array of invariants to compute. You can use the grute.Invariants class to construct a list, or simply pass grute.Invariants.ALL to compute them all. fiber_file (str: DEFAULT_FIBER_FILE)*: A local filename of an MRI Studio .dat file atlas_file (str: None)*: A local atlas file, in NIFTI .nii format. If none is specified, the Desikan atlas is used by default. use_threads (bool: False)*: Whether to run the download in a Python thread. If set to True, the call to `build_graph` will end quickly, and the `callback` will be called with the returned status-code of the restful call as its only argument. callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: When the supplied values are invalid (contain invalid characters, bad email address supplied, etc.) RemoteDataNotFoundError: When the data cannot be processed due to a server error. """ if email is None: email = self.email if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") # Once we get here, we know the callback is if size not in [self.BIG, self.SMALL]: raise ValueError("size must be either grute.BIG or grute.SMALL.") url = "buildgraph/{}/{}/{}/{}/{}/{}/{}/{}/".format( project, site, subject, session, scan, size, email, "/".join(invariants) ) if " " in url: raise ValueError("Arguments must not contain spaces.") if use_threads: # Run in the background. download_thread = threading.Thread( target=self._run_build_graph, args=[url, fiber_file, atlas_file, callback] ) download_thread.start() else: # Run in the foreground. return self._run_build_graph(url, fiber_file, atlas_file) return
def build_graph(self, project, site, subject, session, scan, size, email=None, invariants=Invariants.ALL, fiber_file=DEFAULT_FIBER_FILE, atlas_file=None, use_threads=False, callback=None): """ Builds a graph using the graph-services endpoint. Arguments: project (str): The project to use site (str): The site in question subject (str): The subject's identifier session (str): The session (per subject) scan (str): The scan identifier size (str): Whether to return a big (grute.BIG) or small (grute.SMALL) graph. For a better explanation, see m2g.io. email (str : self.email)*: An email to notify invariants (str[]: Invariants.ALL)*: An array of invariants to compute. You can use the grute.Invariants class to construct a list, or simply pass grute.Invariants.ALL to compute them all. fiber_file (str: DEFAULT_FIBER_FILE)*: A local filename of an MRI Studio .dat file atlas_file (str: None)*: A local atlas file, in NIFTI .nii format. If none is specified, the Desikan atlas is used by default. use_threads (bool: False)*: Whether to run the download in a Python thread. If set to True, the call to `build_graph` will end quickly, and the `callback` will be called with the returned status-code of the restful call as its only argument. callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: When the supplied values are invalid (contain invalid characters, bad email address supplied, etc.) RemoteDataNotFoundError: When the data cannot be processed due to a server error. """ if email is None: email = self.email if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") # Once we get here, we know the callback is if size not in [self.BIG, self.SMALL]: raise ValueError("size must be either grute.BIG or grute.SMALL.") url = "buildgraph/{}/{}/{}/{}/{}/{}/{}/{}/".format( project, site, subject, session, scan, size, email, "/".join(invariants) ) if " " in url: raise ValueError("Arguments must not contain spaces.") if use_threads: # Run in the background. download_thread = threading.Thread( target=self._run_build_graph, args=[url, fiber_file, atlas_file, callback] ) download_thread.start() else: # Run in the foreground. return self._run_build_graph(url, fiber_file, atlas_file) return
[ "Builds", "a", "graph", "using", "the", "graph", "-", "services", "endpoint", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/grute.py#L151-L231
[ "def", "build_graph", "(", "self", ",", "project", ",", "site", ",", "subject", ",", "session", ",", "scan", ",", "size", ",", "email", "=", "None", ",", "invariants", "=", "Invariants", ".", "ALL", ",", "fiber_file", "=", "DEFAULT_FIBER_FILE", ",", "atlas_file", "=", "None", ",", "use_threads", "=", "False", ",", "callback", "=", "None", ")", ":", "if", "email", "is", "None", ":", "email", "=", "self", ".", "email", "if", "not", "set", "(", "invariants", ")", "<=", "set", "(", "Invariants", ".", "ALL", ")", ":", "raise", "ValueError", "(", "\"Invariants must be a subset of Invariants.ALL.\"", ")", "if", "use_threads", "and", "callback", "is", "not", "None", ":", "if", "not", "hasattr", "(", "callback", ",", "'__call__'", ")", ":", "raise", "ValueError", "(", "\"callback must be a function.\"", ")", "if", "len", "(", "inspect", ".", "getargspec", "(", "callback", ")", ".", "args", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"callback must take exactly 1 argument.\"", ")", "# Once we get here, we know the callback is", "if", "size", "not", "in", "[", "self", ".", "BIG", ",", "self", ".", "SMALL", "]", ":", "raise", "ValueError", "(", "\"size must be either grute.BIG or grute.SMALL.\"", ")", "url", "=", "\"buildgraph/{}/{}/{}/{}/{}/{}/{}/{}/\"", ".", "format", "(", "project", ",", "site", ",", "subject", ",", "session", ",", "scan", ",", "size", ",", "email", ",", "\"/\"", ".", "join", "(", "invariants", ")", ")", "if", "\" \"", "in", "url", ":", "raise", "ValueError", "(", "\"Arguments must not contain spaces.\"", ")", "if", "use_threads", ":", "# Run in the background.", "download_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_build_graph", ",", "args", "=", "[", "url", ",", "fiber_file", ",", "atlas_file", ",", "callback", "]", ")", "download_thread", ".", "start", "(", ")", "else", ":", "# Run in the foreground.", "return", "self", ".", "_run_build_graph", "(", "url", ",", "fiber_file", ",", "atlas_file", ")", "return" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
grute.compute_invariants
Compute invariants from an existing GraphML file using the remote grute graph services. Arguments: graph_file (str): The filename of the graphml file input_format (str): One of grute.GraphFormats invariants (str[]: Invariants.ALL)*: An array of grute.Invariants to compute on the graph email (str: self.email)*: The email to notify upon completion use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server to return the invariants callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: If the graph file does not exist, or if there are issues with the passed arguments RemoteDataUploadError: If there is an issue packing the file RemoteError: If the server experiences difficulty computing invs
ndio/remote/grute.py
def compute_invariants(self, graph_file, input_format, invariants=Invariants.ALL, email=None, use_threads=False, callback=None): """ Compute invariants from an existing GraphML file using the remote grute graph services. Arguments: graph_file (str): The filename of the graphml file input_format (str): One of grute.GraphFormats invariants (str[]: Invariants.ALL)*: An array of grute.Invariants to compute on the graph email (str: self.email)*: The email to notify upon completion use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server to return the invariants callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: If the graph file does not exist, or if there are issues with the passed arguments RemoteDataUploadError: If there is an issue packing the file RemoteError: If the server experiences difficulty computing invs """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format, {}.".format(input_format)) if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") url = "graphupload/{}/{}/{}/".format( email, input_format, "/".join(invariants) ) if " " in url: raise ValueError("Arguments cannot have spaces in them.") if not (os.path.exists(graph_file)): raise ValueError("File {} does not exist.".format(graph_file)) if use_threads: # Run in the background. upload_thread = threading.Thread( target=self._run_compute_invariants, args=[url, graph_file, callback] ) upload_thread.start() else: # Run in the foreground. return self._run_compute_invariants(url, graph_file) return
def compute_invariants(self, graph_file, input_format, invariants=Invariants.ALL, email=None, use_threads=False, callback=None): """ Compute invariants from an existing GraphML file using the remote grute graph services. Arguments: graph_file (str): The filename of the graphml file input_format (str): One of grute.GraphFormats invariants (str[]: Invariants.ALL)*: An array of grute.Invariants to compute on the graph email (str: self.email)*: The email to notify upon completion use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server to return the invariants callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: If the graph file does not exist, or if there are issues with the passed arguments RemoteDataUploadError: If there is an issue packing the file RemoteError: If the server experiences difficulty computing invs """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format, {}.".format(input_format)) if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") url = "graphupload/{}/{}/{}/".format( email, input_format, "/".join(invariants) ) if " " in url: raise ValueError("Arguments cannot have spaces in them.") if not (os.path.exists(graph_file)): raise ValueError("File {} does not exist.".format(graph_file)) if use_threads: # Run in the background. upload_thread = threading.Thread( target=self._run_compute_invariants, args=[url, graph_file, callback] ) upload_thread.start() else: # Run in the foreground. return self._run_compute_invariants(url, graph_file) return
[ "Compute", "invariants", "from", "an", "existing", "GraphML", "file", "using", "the", "remote", "grute", "graph", "services", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/grute.py#L262-L329
[ "def", "compute_invariants", "(", "self", ",", "graph_file", ",", "input_format", ",", "invariants", "=", "Invariants", ".", "ALL", ",", "email", "=", "None", ",", "use_threads", "=", "False", ",", "callback", "=", "None", ")", ":", "if", "email", "is", "None", ":", "email", "=", "self", ".", "email", "if", "input_format", "not", "in", "GraphFormats", ".", "_any", ":", "raise", "ValueError", "(", "\"Invalid input format, {}.\"", ".", "format", "(", "input_format", ")", ")", "if", "not", "set", "(", "invariants", ")", "<=", "set", "(", "Invariants", ".", "ALL", ")", ":", "raise", "ValueError", "(", "\"Invariants must be a subset of Invariants.ALL.\"", ")", "if", "use_threads", "and", "callback", "is", "not", "None", ":", "if", "not", "hasattr", "(", "callback", ",", "'__call__'", ")", ":", "raise", "ValueError", "(", "\"callback must be a function.\"", ")", "if", "len", "(", "inspect", ".", "getargspec", "(", "callback", ")", ".", "args", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"callback must take exactly 1 argument.\"", ")", "url", "=", "\"graphupload/{}/{}/{}/\"", ".", "format", "(", "email", ",", "input_format", ",", "\"/\"", ".", "join", "(", "invariants", ")", ")", "if", "\" \"", "in", "url", ":", "raise", "ValueError", "(", "\"Arguments cannot have spaces in them.\"", ")", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "graph_file", ")", ")", ":", "raise", "ValueError", "(", "\"File {} does not exist.\"", ".", "format", "(", "graph_file", ")", ")", "if", "use_threads", ":", "# Run in the background.", "upload_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_compute_invariants", ",", "args", "=", "[", "url", ",", "graph_file", ",", "callback", "]", ")", "upload_thread", ".", "start", "(", ")", "else", ":", "# Run in the foreground.", "return", "self", ".", "_run_compute_invariants", "(", "url", ",", "graph_file", ")", "return" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
grute.convert_graph
Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments
ndio/remote/grute.py
def convert_graph(self, graph_file, input_format, output_formats, email=None, use_threads=False, callback=None): """ Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format {}.".format(input_format)) if not set(output_formats) <= set(GraphFormats._any): raise ValueError("Output formats must be a GraphFormats.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") if not (os.path.exists(graph_file)): raise ValueError("No such file, {}!".format(graph_file)) url = "convert/{}/{}/{}/l".format( email, input_format, ','.join(output_formats) ) if " " in url: raise ValueError("Spaces are not permitted in arguments.") if use_threads: # Run in the background. convert_thread = threading.Thread( target=self._run_convert_graph, args=[url, graph_file, callback] ) convert_thread.start() else: # Run in the foreground. return self._run_convert_graph(url, graph_file) return
def convert_graph(self, graph_file, input_format, output_formats, email=None, use_threads=False, callback=None): """ Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format {}.".format(input_format)) if not set(output_formats) <= set(GraphFormats._any): raise ValueError("Output formats must be a GraphFormats.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") if not (os.path.exists(graph_file)): raise ValueError("No such file, {}!".format(graph_file)) url = "convert/{}/{}/{}/l".format( email, input_format, ','.join(output_formats) ) if " " in url: raise ValueError("Spaces are not permitted in arguments.") if use_threads: # Run in the background. convert_thread = threading.Thread( target=self._run_convert_graph, args=[url, graph_file, callback] ) convert_thread.start() else: # Run in the foreground. return self._run_convert_graph(url, graph_file) return
[ "Convert", "a", "graph", "from", "one", "GraphFormat", "to", "another", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/grute.py#L356-L417
[ "def", "convert_graph", "(", "self", ",", "graph_file", ",", "input_format", ",", "output_formats", ",", "email", "=", "None", ",", "use_threads", "=", "False", ",", "callback", "=", "None", ")", ":", "if", "email", "is", "None", ":", "email", "=", "self", ".", "email", "if", "input_format", "not", "in", "GraphFormats", ".", "_any", ":", "raise", "ValueError", "(", "\"Invalid input format {}.\"", ".", "format", "(", "input_format", ")", ")", "if", "not", "set", "(", "output_formats", ")", "<=", "set", "(", "GraphFormats", ".", "_any", ")", ":", "raise", "ValueError", "(", "\"Output formats must be a GraphFormats.\"", ")", "if", "use_threads", "and", "callback", "is", "not", "None", ":", "if", "not", "hasattr", "(", "callback", ",", "'__call__'", ")", ":", "raise", "ValueError", "(", "\"callback must be a function.\"", ")", "if", "len", "(", "inspect", ".", "getargspec", "(", "callback", ")", ".", "args", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"callback must take exactly 1 argument.\"", ")", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "graph_file", ")", ")", ":", "raise", "ValueError", "(", "\"No such file, {}!\"", ".", "format", "(", "graph_file", ")", ")", "url", "=", "\"convert/{}/{}/{}/l\"", ".", "format", "(", "email", ",", "input_format", ",", "','", ".", "join", "(", "output_formats", ")", ")", "if", "\" \"", "in", "url", ":", "raise", "ValueError", "(", "\"Spaces are not permitted in arguments.\"", ")", "if", "use_threads", ":", "# Run in the background.", "convert_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_convert_graph", ",", "args", "=", "[", "url", ",", "graph_file", ",", "callback", "]", ")", "convert_thread", ".", "start", "(", ")", "else", ":", "# Run in the foreground.", "return", "self", ".", "_run_convert_graph", "(", "url", ",", "graph_file", ")", "return" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
to_dict
Converts a RAMON object list to a JSON-style dictionary. Useful for going from an array of RAMONs to a dictionary, indexed by ID. Arguments: ramons (RAMON[]): A list of RAMON objects flatten (boolean: False): Not implemented Returns: dict: A python dictionary of RAMON objects.
ndio/ramon/__init__.py
def to_dict(ramons, flatten=False): """ Converts a RAMON object list to a JSON-style dictionary. Useful for going from an array of RAMONs to a dictionary, indexed by ID. Arguments: ramons (RAMON[]): A list of RAMON objects flatten (boolean: False): Not implemented Returns: dict: A python dictionary of RAMON objects. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } return out_ramons
def to_dict(ramons, flatten=False): """ Converts a RAMON object list to a JSON-style dictionary. Useful for going from an array of RAMONs to a dictionary, indexed by ID. Arguments: ramons (RAMON[]): A list of RAMON objects flatten (boolean: False): Not implemented Returns: dict: A python dictionary of RAMON objects. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } return out_ramons
[ "Converts", "a", "RAMON", "object", "list", "to", "a", "JSON", "-", "style", "dictionary", ".", "Useful", "for", "going", "from", "an", "array", "of", "RAMONs", "to", "a", "dictionary", "indexed", "by", "ID", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/ramon/__init__.py#L152-L174
[ "def", "to_dict", "(", "ramons", ",", "flatten", "=", "False", ")", ":", "if", "type", "(", "ramons", ")", "is", "not", "list", ":", "ramons", "=", "[", "ramons", "]", "out_ramons", "=", "{", "}", "for", "r", "in", "ramons", ":", "out_ramons", "[", "r", ".", "id", "]", "=", "{", "\"id\"", ":", "r", ".", "id", ",", "\"type\"", ":", "_reverse_ramon_types", "[", "type", "(", "r", ")", "]", ",", "\"metadata\"", ":", "vars", "(", "r", ")", "}", "return", "out_ramons" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
to_json
Converts RAMON objects into a JSON string which can be directly written out to a .json file. You can pass either a single RAMON or a list. If you pass a single RAMON, it will still be exported with the ID as the key. In other words: type(from_json(to_json(ramon))) # ALWAYS returns a list ...even if `type(ramon)` is a RAMON, not a list. Arguments: ramons (RAMON or list): The RAMON object(s) to convert to JSON. flatten (bool : False): If ID should be used as a key. If not, then a single JSON document is returned. Returns: str: The JSON representation of the RAMON objects, in the schema: ``` { <id>: { type: . . . , metadata: { . . . } }, } ``` Raises: ValueError: If an invalid RAMON is passed.
ndio/ramon/__init__.py
def to_json(ramons, flatten=False): """ Converts RAMON objects into a JSON string which can be directly written out to a .json file. You can pass either a single RAMON or a list. If you pass a single RAMON, it will still be exported with the ID as the key. In other words: type(from_json(to_json(ramon))) # ALWAYS returns a list ...even if `type(ramon)` is a RAMON, not a list. Arguments: ramons (RAMON or list): The RAMON object(s) to convert to JSON. flatten (bool : False): If ID should be used as a key. If not, then a single JSON document is returned. Returns: str: The JSON representation of the RAMON objects, in the schema: ``` { <id>: { type: . . . , metadata: { . . . } }, } ``` Raises: ValueError: If an invalid RAMON is passed. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } if flatten: return jsonlib.dumps(out_ramons.values()[0]) return jsonlib.dumps(out_ramons)
def to_json(ramons, flatten=False): """ Converts RAMON objects into a JSON string which can be directly written out to a .json file. You can pass either a single RAMON or a list. If you pass a single RAMON, it will still be exported with the ID as the key. In other words: type(from_json(to_json(ramon))) # ALWAYS returns a list ...even if `type(ramon)` is a RAMON, not a list. Arguments: ramons (RAMON or list): The RAMON object(s) to convert to JSON. flatten (bool : False): If ID should be used as a key. If not, then a single JSON document is returned. Returns: str: The JSON representation of the RAMON objects, in the schema: ``` { <id>: { type: . . . , metadata: { . . . } }, } ``` Raises: ValueError: If an invalid RAMON is passed. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } if flatten: return jsonlib.dumps(out_ramons.values()[0]) return jsonlib.dumps(out_ramons)
[ "Converts", "RAMON", "objects", "into", "a", "JSON", "string", "which", "can", "be", "directly", "written", "out", "to", "a", ".", "json", "file", ".", "You", "can", "pass", "either", "a", "single", "RAMON", "or", "a", "list", ".", "If", "you", "pass", "a", "single", "RAMON", "it", "will", "still", "be", "exported", "with", "the", "ID", "as", "the", "key", ".", "In", "other", "words", ":" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/ramon/__init__.py#L177-L223
[ "def", "to_json", "(", "ramons", ",", "flatten", "=", "False", ")", ":", "if", "type", "(", "ramons", ")", "is", "not", "list", ":", "ramons", "=", "[", "ramons", "]", "out_ramons", "=", "{", "}", "for", "r", "in", "ramons", ":", "out_ramons", "[", "r", ".", "id", "]", "=", "{", "\"id\"", ":", "r", ".", "id", ",", "\"type\"", ":", "_reverse_ramon_types", "[", "type", "(", "r", ")", "]", ",", "\"metadata\"", ":", "vars", "(", "r", ")", "}", "if", "flatten", ":", "return", "jsonlib", ".", "dumps", "(", "out_ramons", ".", "values", "(", ")", "[", "0", "]", ")", "return", "jsonlib", ".", "dumps", "(", "out_ramons", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
from_json
Converts JSON to a python list of RAMON objects. if `cutout` is provided, the `cutout` attribute of the RAMON object is populated. Otherwise, it's left empty. `json` should be an ID-level dictionary, like so: { 16: { type: "segment", metadata: { . . . } }, } NOTE: If more than one item is in the dictionary, then a Python list of RAMON objects is returned instead of a single RAMON. Arguments: json (str or dict): The JSON to import to RAMON objects cutout: Currently not supported. Returns: [RAMON]
ndio/ramon/__init__.py
def from_json(json, cutout=None): """ Converts JSON to a python list of RAMON objects. if `cutout` is provided, the `cutout` attribute of the RAMON object is populated. Otherwise, it's left empty. `json` should be an ID-level dictionary, like so: { 16: { type: "segment", metadata: { . . . } }, } NOTE: If more than one item is in the dictionary, then a Python list of RAMON objects is returned instead of a single RAMON. Arguments: json (str or dict): The JSON to import to RAMON objects cutout: Currently not supported. Returns: [RAMON] """ if type(json) is str: json = jsonlib.loads(json) out_ramons = [] for (rid, rdata) in six.iteritems(json): _md = rdata['metadata'] r = AnnotationType.RAMON(rdata['type'])( id=rid, author=_md['author'], status=_md['status'], confidence=_md['confidence'], kvpairs=copy.deepcopy(_md['kvpairs']) ) if rdata['type'] == 'segment': r.segmentclass = _md.get('segmentclass') r.neuron = _md.get('neuron') if 'synapses' in _md: r.synapses = _md['synapses'][:] if 'organelles' in _md: r.organelles = _md['organelles'][:] elif rdata['type'] in ['neuron', 'synapse']: if 'segments' in _md: r.segments = _md['segments'][:] elif rdata['type'] == 'organelle': r.organelle_class = _md['organelleclass'][:] elif rdata['type'] == 'synapse': r.synapse_type = _md.get('synapse_type') r.weight = _md.get('weight') out_ramons.append(r) return out_ramons
def from_json(json, cutout=None): """ Converts JSON to a python list of RAMON objects. if `cutout` is provided, the `cutout` attribute of the RAMON object is populated. Otherwise, it's left empty. `json` should be an ID-level dictionary, like so: { 16: { type: "segment", metadata: { . . . } }, } NOTE: If more than one item is in the dictionary, then a Python list of RAMON objects is returned instead of a single RAMON. Arguments: json (str or dict): The JSON to import to RAMON objects cutout: Currently not supported. Returns: [RAMON] """ if type(json) is str: json = jsonlib.loads(json) out_ramons = [] for (rid, rdata) in six.iteritems(json): _md = rdata['metadata'] r = AnnotationType.RAMON(rdata['type'])( id=rid, author=_md['author'], status=_md['status'], confidence=_md['confidence'], kvpairs=copy.deepcopy(_md['kvpairs']) ) if rdata['type'] == 'segment': r.segmentclass = _md.get('segmentclass') r.neuron = _md.get('neuron') if 'synapses' in _md: r.synapses = _md['synapses'][:] if 'organelles' in _md: r.organelles = _md['organelles'][:] elif rdata['type'] in ['neuron', 'synapse']: if 'segments' in _md: r.segments = _md['segments'][:] elif rdata['type'] == 'organelle': r.organelle_class = _md['organelleclass'][:] elif rdata['type'] == 'synapse': r.synapse_type = _md.get('synapse_type') r.weight = _md.get('weight') out_ramons.append(r) return out_ramons
[ "Converts", "JSON", "to", "a", "python", "list", "of", "RAMON", "objects", ".", "if", "cutout", "is", "provided", "the", "cutout", "attribute", "of", "the", "RAMON", "object", "is", "populated", ".", "Otherwise", "it", "s", "left", "empty", ".", "json", "should", "be", "an", "ID", "-", "level", "dictionary", "like", "so", ":" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/ramon/__init__.py#L226-L286
[ "def", "from_json", "(", "json", ",", "cutout", "=", "None", ")", ":", "if", "type", "(", "json", ")", "is", "str", ":", "json", "=", "jsonlib", ".", "loads", "(", "json", ")", "out_ramons", "=", "[", "]", "for", "(", "rid", ",", "rdata", ")", "in", "six", ".", "iteritems", "(", "json", ")", ":", "_md", "=", "rdata", "[", "'metadata'", "]", "r", "=", "AnnotationType", ".", "RAMON", "(", "rdata", "[", "'type'", "]", ")", "(", "id", "=", "rid", ",", "author", "=", "_md", "[", "'author'", "]", ",", "status", "=", "_md", "[", "'status'", "]", ",", "confidence", "=", "_md", "[", "'confidence'", "]", ",", "kvpairs", "=", "copy", ".", "deepcopy", "(", "_md", "[", "'kvpairs'", "]", ")", ")", "if", "rdata", "[", "'type'", "]", "==", "'segment'", ":", "r", ".", "segmentclass", "=", "_md", ".", "get", "(", "'segmentclass'", ")", "r", ".", "neuron", "=", "_md", ".", "get", "(", "'neuron'", ")", "if", "'synapses'", "in", "_md", ":", "r", ".", "synapses", "=", "_md", "[", "'synapses'", "]", "[", ":", "]", "if", "'organelles'", "in", "_md", ":", "r", ".", "organelles", "=", "_md", "[", "'organelles'", "]", "[", ":", "]", "elif", "rdata", "[", "'type'", "]", "in", "[", "'neuron'", ",", "'synapse'", "]", ":", "if", "'segments'", "in", "_md", ":", "r", ".", "segments", "=", "_md", "[", "'segments'", "]", "[", ":", "]", "elif", "rdata", "[", "'type'", "]", "==", "'organelle'", ":", "r", ".", "organelle_class", "=", "_md", "[", "'organelleclass'", "]", "[", ":", "]", "elif", "rdata", "[", "'type'", "]", "==", "'synapse'", ":", "r", ".", "synapse_type", "=", "_md", ".", "get", "(", "'synapse_type'", ")", "r", ".", "weight", "=", "_md", ".", "get", "(", "'weight'", ")", "out_ramons", ".", "append", "(", "r", ")", "return", "out_ramons" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
from_hdf5
Converts an HDF5 file to a RAMON object. Returns an object that is a child- -class of RAMON (though it's determined at run-time what type is returned). Accessing multiple IDs from the same file is not supported, because it's not dramatically faster to access each item in the hdf5 file at the same time It's semantically and computationally easier to run this function several times on the same file. Arguments: hdf5 (h5py.File): A h5py File object that holds RAMON data anno_id (int): The ID of the RAMON obj to extract from the file. This defaults to the first one (sorted) if none is specified. Returns: ndio.RAMON object
ndio/ramon/__init__.py
def from_hdf5(hdf5, anno_id=None): """ Converts an HDF5 file to a RAMON object. Returns an object that is a child- -class of RAMON (though it's determined at run-time what type is returned). Accessing multiple IDs from the same file is not supported, because it's not dramatically faster to access each item in the hdf5 file at the same time It's semantically and computationally easier to run this function several times on the same file. Arguments: hdf5 (h5py.File): A h5py File object that holds RAMON data anno_id (int): The ID of the RAMON obj to extract from the file. This defaults to the first one (sorted) if none is specified. Returns: ndio.RAMON object """ if anno_id is None: # The user just wants the first item we find, so... Yeah. return from_hdf5(hdf5, list(hdf5.keys())[0]) # First, get the actual object we're going to download. anno_id = str(anno_id) if anno_id not in list(hdf5.keys()): raise ValueError("ID {} is not in this file. Options are: {}".format( anno_id, ", ".join(list(hdf5.keys())) )) anno = hdf5[anno_id] # anno now holds just the RAMON of interest # This is the most complicated line in here: It creates an object whose # type is conditional on the ANNOTATION_TYPE of the hdf5 object. try: r = AnnotationType.get_class(anno['ANNOTATION_TYPE'][0])() except: raise InvalidRAMONError("This is not a valid RAMON type.") # All RAMON types definitely have these attributes: metadata = anno['METADATA'] r.author = metadata['AUTHOR'][0] r.confidence = metadata['CONFIDENCE'][0] r.status = metadata['STATUS'][0] r.id = anno_id # These are a little tougher, some RAMON types have special attributes: if type(r) in [RAMONNeuron, RAMONSynapse]: r.segments = metadata['SEGMENTS'][()] if 'KVPAIRS' in metadata: kvs = metadata['KVPAIRS'][()][0].split() if len(kvs) != 0: for i in kvs: k, v = str(i).split(',') r.kvpairs[str(k)] = str(v) else: r.kvpairs = {} if issubclass(type(r), RAMONVolume): if 'CUTOUT' in anno: r.cutout = anno['CUTOUT'][()] if 'XYZOFFSET' in anno: r.cutout = anno['XYZOFFSET'][()] if 'RESOLUTION' in anno: r.cutout = anno['RESOLUTION'][()] if type(r) is RAMONSynapse: r.synapse_type = metadata['SYNAPSE_TYPE'][0] r.weight = metadata['WEIGHT'][0] if type(r) is RAMONSegment: if 'NEURON' in metadata: r.neuron = metadata['NEURON'][0] if 'PARENTSEED' in metadata: r.parent_seed = metadata['PARENTSEED'][0] if 'SEGMENTCLASS' in metadata: r.segmentclass = metadata['SEGMENTCLASS'][0] if 'SYNAPSES' in metadata: r.synapses = metadata['SYNAPSES'][()] if 'ORGANELLES' in metadata: r.organelles = metadata['ORGANELLES'][()] if type(r) is RAMONOrganelle: r.organelle_class = metadata['ORGANELLECLASS'][0] return r
def from_hdf5(hdf5, anno_id=None): """ Converts an HDF5 file to a RAMON object. Returns an object that is a child- -class of RAMON (though it's determined at run-time what type is returned). Accessing multiple IDs from the same file is not supported, because it's not dramatically faster to access each item in the hdf5 file at the same time It's semantically and computationally easier to run this function several times on the same file. Arguments: hdf5 (h5py.File): A h5py File object that holds RAMON data anno_id (int): The ID of the RAMON obj to extract from the file. This defaults to the first one (sorted) if none is specified. Returns: ndio.RAMON object """ if anno_id is None: # The user just wants the first item we find, so... Yeah. return from_hdf5(hdf5, list(hdf5.keys())[0]) # First, get the actual object we're going to download. anno_id = str(anno_id) if anno_id not in list(hdf5.keys()): raise ValueError("ID {} is not in this file. Options are: {}".format( anno_id, ", ".join(list(hdf5.keys())) )) anno = hdf5[anno_id] # anno now holds just the RAMON of interest # This is the most complicated line in here: It creates an object whose # type is conditional on the ANNOTATION_TYPE of the hdf5 object. try: r = AnnotationType.get_class(anno['ANNOTATION_TYPE'][0])() except: raise InvalidRAMONError("This is not a valid RAMON type.") # All RAMON types definitely have these attributes: metadata = anno['METADATA'] r.author = metadata['AUTHOR'][0] r.confidence = metadata['CONFIDENCE'][0] r.status = metadata['STATUS'][0] r.id = anno_id # These are a little tougher, some RAMON types have special attributes: if type(r) in [RAMONNeuron, RAMONSynapse]: r.segments = metadata['SEGMENTS'][()] if 'KVPAIRS' in metadata: kvs = metadata['KVPAIRS'][()][0].split() if len(kvs) != 0: for i in kvs: k, v = str(i).split(',') r.kvpairs[str(k)] = str(v) else: r.kvpairs = {} if issubclass(type(r), RAMONVolume): if 'CUTOUT' in anno: r.cutout = anno['CUTOUT'][()] if 'XYZOFFSET' in anno: r.cutout = anno['XYZOFFSET'][()] if 'RESOLUTION' in anno: r.cutout = anno['RESOLUTION'][()] if type(r) is RAMONSynapse: r.synapse_type = metadata['SYNAPSE_TYPE'][0] r.weight = metadata['WEIGHT'][0] if type(r) is RAMONSegment: if 'NEURON' in metadata: r.neuron = metadata['NEURON'][0] if 'PARENTSEED' in metadata: r.parent_seed = metadata['PARENTSEED'][0] if 'SEGMENTCLASS' in metadata: r.segmentclass = metadata['SEGMENTCLASS'][0] if 'SYNAPSES' in metadata: r.synapses = metadata['SYNAPSES'][()] if 'ORGANELLES' in metadata: r.organelles = metadata['ORGANELLES'][()] if type(r) is RAMONOrganelle: r.organelle_class = metadata['ORGANELLECLASS'][0] return r
[ "Converts", "an", "HDF5", "file", "to", "a", "RAMON", "object", ".", "Returns", "an", "object", "that", "is", "a", "child", "-", "-", "class", "of", "RAMON", "(", "though", "it", "s", "determined", "at", "run", "-", "time", "what", "type", "is", "returned", ")", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/ramon/__init__.py#L289-L378
[ "def", "from_hdf5", "(", "hdf5", ",", "anno_id", "=", "None", ")", ":", "if", "anno_id", "is", "None", ":", "# The user just wants the first item we find, so... Yeah.", "return", "from_hdf5", "(", "hdf5", ",", "list", "(", "hdf5", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "# First, get the actual object we're going to download.", "anno_id", "=", "str", "(", "anno_id", ")", "if", "anno_id", "not", "in", "list", "(", "hdf5", ".", "keys", "(", ")", ")", ":", "raise", "ValueError", "(", "\"ID {} is not in this file. Options are: {}\"", ".", "format", "(", "anno_id", ",", "\", \"", ".", "join", "(", "list", "(", "hdf5", ".", "keys", "(", ")", ")", ")", ")", ")", "anno", "=", "hdf5", "[", "anno_id", "]", "# anno now holds just the RAMON of interest", "# This is the most complicated line in here: It creates an object whose", "# type is conditional on the ANNOTATION_TYPE of the hdf5 object.", "try", ":", "r", "=", "AnnotationType", ".", "get_class", "(", "anno", "[", "'ANNOTATION_TYPE'", "]", "[", "0", "]", ")", "(", ")", "except", ":", "raise", "InvalidRAMONError", "(", "\"This is not a valid RAMON type.\"", ")", "# All RAMON types definitely have these attributes:", "metadata", "=", "anno", "[", "'METADATA'", "]", "r", ".", "author", "=", "metadata", "[", "'AUTHOR'", "]", "[", "0", "]", "r", ".", "confidence", "=", "metadata", "[", "'CONFIDENCE'", "]", "[", "0", "]", "r", ".", "status", "=", "metadata", "[", "'STATUS'", "]", "[", "0", "]", "r", ".", "id", "=", "anno_id", "# These are a little tougher, some RAMON types have special attributes:", "if", "type", "(", "r", ")", "in", "[", "RAMONNeuron", ",", "RAMONSynapse", "]", ":", "r", ".", "segments", "=", "metadata", "[", "'SEGMENTS'", "]", "[", "(", ")", "]", "if", "'KVPAIRS'", "in", "metadata", ":", "kvs", "=", "metadata", "[", "'KVPAIRS'", "]", "[", "(", ")", "]", "[", "0", "]", ".", "split", "(", ")", "if", "len", "(", "kvs", ")", "!=", "0", ":", "for", "i", "in", "kvs", ":", "k", ",", "v", "=", "str", "(", "i", ")", ".", "split", "(", "','", ")", "r", ".", "kvpairs", "[", "str", "(", "k", ")", "]", "=", "str", "(", "v", ")", "else", ":", "r", ".", "kvpairs", "=", "{", "}", "if", "issubclass", "(", "type", "(", "r", ")", ",", "RAMONVolume", ")", ":", "if", "'CUTOUT'", "in", "anno", ":", "r", ".", "cutout", "=", "anno", "[", "'CUTOUT'", "]", "[", "(", ")", "]", "if", "'XYZOFFSET'", "in", "anno", ":", "r", ".", "cutout", "=", "anno", "[", "'XYZOFFSET'", "]", "[", "(", ")", "]", "if", "'RESOLUTION'", "in", "anno", ":", "r", ".", "cutout", "=", "anno", "[", "'RESOLUTION'", "]", "[", "(", ")", "]", "if", "type", "(", "r", ")", "is", "RAMONSynapse", ":", "r", ".", "synapse_type", "=", "metadata", "[", "'SYNAPSE_TYPE'", "]", "[", "0", "]", "r", ".", "weight", "=", "metadata", "[", "'WEIGHT'", "]", "[", "0", "]", "if", "type", "(", "r", ")", "is", "RAMONSegment", ":", "if", "'NEURON'", "in", "metadata", ":", "r", ".", "neuron", "=", "metadata", "[", "'NEURON'", "]", "[", "0", "]", "if", "'PARENTSEED'", "in", "metadata", ":", "r", ".", "parent_seed", "=", "metadata", "[", "'PARENTSEED'", "]", "[", "0", "]", "if", "'SEGMENTCLASS'", "in", "metadata", ":", "r", ".", "segmentclass", "=", "metadata", "[", "'SEGMENTCLASS'", "]", "[", "0", "]", "if", "'SYNAPSES'", "in", "metadata", ":", "r", ".", "synapses", "=", "metadata", "[", "'SYNAPSES'", "]", "[", "(", ")", "]", "if", "'ORGANELLES'", "in", "metadata", ":", "r", ".", "organelles", "=", "metadata", "[", "'ORGANELLES'", "]", "[", "(", ")", "]", "if", "type", "(", "r", ")", "is", "RAMONOrganelle", ":", "r", ".", "organelle_class", "=", "metadata", "[", "'ORGANELLECLASS'", "]", "[", "0", "]", "return", "r" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
to_hdf5
Exports a RAMON object to an HDF5 file object. Arguments: ramon (RAMON): A subclass of RAMONBase hdf5 (str): Export filename Returns: hdf5.File Raises: InvalidRAMONError: if you pass a non-RAMON object
ndio/ramon/__init__.py
def to_hdf5(ramon, hdf5=None): """ Exports a RAMON object to an HDF5 file object. Arguments: ramon (RAMON): A subclass of RAMONBase hdf5 (str): Export filename Returns: hdf5.File Raises: InvalidRAMONError: if you pass a non-RAMON object """ if issubclass(type(ramon), RAMONBase) is False: raise InvalidRAMONError("Invalid RAMON supplied to ramon.to_hdf5.") import h5py import numpy if hdf5 is None: tmpfile = tempfile.NamedTemporaryFile(delete=False) else: tmpfile = hdf5 with h5py.File(tmpfile.name, "a") as hdf5: # First we'll export things that all RAMON objects have in # common, starting with the Group that encompasses each ID: grp = hdf5.create_group(str(ramon.id)) grp.create_dataset("ANNOTATION_TYPE", (1,), numpy.uint32, data=AnnotationType.get_int(type(ramon))) if hasattr(ramon, 'cutout'): if ramon.cutout is not None: grp.create_dataset('CUTOUT', ramon.cutout.shape, ramon.cutout.dtype, data=ramon.cutout) grp.create_dataset('RESOLUTION', (1,), numpy.uint32, data=ramon.resolution) grp.create_dataset('XYZOFFSET', (3,), numpy.uint32, data=ramon.xyz_offset) # Next, add general metadata. metadata = grp.create_group('METADATA') metadata.create_dataset('AUTHOR', (1,), dtype=h5py.special_dtype(vlen=str), data=ramon.author) fstring = StringIO() csvw = csv.writer(fstring, delimiter=',') csvw.writerows([r for r in six.iteritems(ramon.kvpairs)]) metadata.create_dataset('KVPAIRS', (1,), dtype=h5py.special_dtype(vlen=str), data=fstring.getvalue()) metadata.create_dataset('CONFIDENCE', (1,), numpy.float, data=ramon.confidence) metadata.create_dataset('STATUS', (1,), numpy.uint32, data=ramon.status) # Finally, add type-specific metadata: if hasattr(ramon, 'segments'): metadata.create_dataset('SEGMENTS', data=numpy.asarray(ramon.segments, dtype=numpy.uint32)) if hasattr(ramon, 'synapse_type'): metadata.create_dataset('SYNAPSE_TYPE', (1,), numpy.uint32, data=ramon.synapse_type) if hasattr(ramon, 'weight'): metadata.create_dataset('WEIGHT', (1,), numpy.float, data=ramon.weight) if hasattr(ramon, 'neuron'): metadata.create_dataset('NEURON', (1,), numpy.uint32, data=ramon.neuron) if hasattr(ramon, 'segmentclass'): metadata.create_dataset('SEGMENTCLASS', (1,), numpy.uint32, data=ramon.segmentclass) if hasattr(ramon, 'synapses'): metadata.create_dataset('SYNAPSES', (len(ramon.synapses),), numpy.uint32, data=ramon.synapses) if hasattr(ramon, 'organelles'): metadata.create_dataset('ORGANELLES', (len(ramon.organelles),), numpy.uint32, data=ramon.organelles) if hasattr(ramon, 'organelle_class'): metadata.create_dataset('ORGANELLECLASS', (1,), numpy.uint32, data=ramon.organelle_class) hdf5.flush() tmpfile.seek(0) return tmpfile return False
def to_hdf5(ramon, hdf5=None): """ Exports a RAMON object to an HDF5 file object. Arguments: ramon (RAMON): A subclass of RAMONBase hdf5 (str): Export filename Returns: hdf5.File Raises: InvalidRAMONError: if you pass a non-RAMON object """ if issubclass(type(ramon), RAMONBase) is False: raise InvalidRAMONError("Invalid RAMON supplied to ramon.to_hdf5.") import h5py import numpy if hdf5 is None: tmpfile = tempfile.NamedTemporaryFile(delete=False) else: tmpfile = hdf5 with h5py.File(tmpfile.name, "a") as hdf5: # First we'll export things that all RAMON objects have in # common, starting with the Group that encompasses each ID: grp = hdf5.create_group(str(ramon.id)) grp.create_dataset("ANNOTATION_TYPE", (1,), numpy.uint32, data=AnnotationType.get_int(type(ramon))) if hasattr(ramon, 'cutout'): if ramon.cutout is not None: grp.create_dataset('CUTOUT', ramon.cutout.shape, ramon.cutout.dtype, data=ramon.cutout) grp.create_dataset('RESOLUTION', (1,), numpy.uint32, data=ramon.resolution) grp.create_dataset('XYZOFFSET', (3,), numpy.uint32, data=ramon.xyz_offset) # Next, add general metadata. metadata = grp.create_group('METADATA') metadata.create_dataset('AUTHOR', (1,), dtype=h5py.special_dtype(vlen=str), data=ramon.author) fstring = StringIO() csvw = csv.writer(fstring, delimiter=',') csvw.writerows([r for r in six.iteritems(ramon.kvpairs)]) metadata.create_dataset('KVPAIRS', (1,), dtype=h5py.special_dtype(vlen=str), data=fstring.getvalue()) metadata.create_dataset('CONFIDENCE', (1,), numpy.float, data=ramon.confidence) metadata.create_dataset('STATUS', (1,), numpy.uint32, data=ramon.status) # Finally, add type-specific metadata: if hasattr(ramon, 'segments'): metadata.create_dataset('SEGMENTS', data=numpy.asarray(ramon.segments, dtype=numpy.uint32)) if hasattr(ramon, 'synapse_type'): metadata.create_dataset('SYNAPSE_TYPE', (1,), numpy.uint32, data=ramon.synapse_type) if hasattr(ramon, 'weight'): metadata.create_dataset('WEIGHT', (1,), numpy.float, data=ramon.weight) if hasattr(ramon, 'neuron'): metadata.create_dataset('NEURON', (1,), numpy.uint32, data=ramon.neuron) if hasattr(ramon, 'segmentclass'): metadata.create_dataset('SEGMENTCLASS', (1,), numpy.uint32, data=ramon.segmentclass) if hasattr(ramon, 'synapses'): metadata.create_dataset('SYNAPSES', (len(ramon.synapses),), numpy.uint32, data=ramon.synapses) if hasattr(ramon, 'organelles'): metadata.create_dataset('ORGANELLES', (len(ramon.organelles),), numpy.uint32, data=ramon.organelles) if hasattr(ramon, 'organelle_class'): metadata.create_dataset('ORGANELLECLASS', (1,), numpy.uint32, data=ramon.organelle_class) hdf5.flush() tmpfile.seek(0) return tmpfile return False
[ "Exports", "a", "RAMON", "object", "to", "an", "HDF5", "file", "object", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/ramon/__init__.py#L381-L484
[ "def", "to_hdf5", "(", "ramon", ",", "hdf5", "=", "None", ")", ":", "if", "issubclass", "(", "type", "(", "ramon", ")", ",", "RAMONBase", ")", "is", "False", ":", "raise", "InvalidRAMONError", "(", "\"Invalid RAMON supplied to ramon.to_hdf5.\"", ")", "import", "h5py", "import", "numpy", "if", "hdf5", "is", "None", ":", "tmpfile", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "else", ":", "tmpfile", "=", "hdf5", "with", "h5py", ".", "File", "(", "tmpfile", ".", "name", ",", "\"a\"", ")", "as", "hdf5", ":", "# First we'll export things that all RAMON objects have in", "# common, starting with the Group that encompasses each ID:", "grp", "=", "hdf5", ".", "create_group", "(", "str", "(", "ramon", ".", "id", ")", ")", "grp", ".", "create_dataset", "(", "\"ANNOTATION_TYPE\"", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "AnnotationType", ".", "get_int", "(", "type", "(", "ramon", ")", ")", ")", "if", "hasattr", "(", "ramon", ",", "'cutout'", ")", ":", "if", "ramon", ".", "cutout", "is", "not", "None", ":", "grp", ".", "create_dataset", "(", "'CUTOUT'", ",", "ramon", ".", "cutout", ".", "shape", ",", "ramon", ".", "cutout", ".", "dtype", ",", "data", "=", "ramon", ".", "cutout", ")", "grp", ".", "create_dataset", "(", "'RESOLUTION'", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "resolution", ")", "grp", ".", "create_dataset", "(", "'XYZOFFSET'", ",", "(", "3", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "xyz_offset", ")", "# Next, add general metadata.", "metadata", "=", "grp", ".", "create_group", "(", "'METADATA'", ")", "metadata", ".", "create_dataset", "(", "'AUTHOR'", ",", "(", "1", ",", ")", ",", "dtype", "=", "h5py", ".", "special_dtype", "(", "vlen", "=", "str", ")", ",", "data", "=", "ramon", ".", "author", ")", "fstring", "=", "StringIO", "(", ")", "csvw", "=", "csv", ".", "writer", "(", "fstring", ",", "delimiter", "=", "','", ")", "csvw", ".", "writerows", "(", "[", "r", "for", "r", "in", "six", ".", "iteritems", "(", "ramon", ".", "kvpairs", ")", "]", ")", "metadata", ".", "create_dataset", "(", "'KVPAIRS'", ",", "(", "1", ",", ")", ",", "dtype", "=", "h5py", ".", "special_dtype", "(", "vlen", "=", "str", ")", ",", "data", "=", "fstring", ".", "getvalue", "(", ")", ")", "metadata", ".", "create_dataset", "(", "'CONFIDENCE'", ",", "(", "1", ",", ")", ",", "numpy", ".", "float", ",", "data", "=", "ramon", ".", "confidence", ")", "metadata", ".", "create_dataset", "(", "'STATUS'", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "status", ")", "# Finally, add type-specific metadata:", "if", "hasattr", "(", "ramon", ",", "'segments'", ")", ":", "metadata", ".", "create_dataset", "(", "'SEGMENTS'", ",", "data", "=", "numpy", ".", "asarray", "(", "ramon", ".", "segments", ",", "dtype", "=", "numpy", ".", "uint32", ")", ")", "if", "hasattr", "(", "ramon", ",", "'synapse_type'", ")", ":", "metadata", ".", "create_dataset", "(", "'SYNAPSE_TYPE'", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "synapse_type", ")", "if", "hasattr", "(", "ramon", ",", "'weight'", ")", ":", "metadata", ".", "create_dataset", "(", "'WEIGHT'", ",", "(", "1", ",", ")", ",", "numpy", ".", "float", ",", "data", "=", "ramon", ".", "weight", ")", "if", "hasattr", "(", "ramon", ",", "'neuron'", ")", ":", "metadata", ".", "create_dataset", "(", "'NEURON'", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "neuron", ")", "if", "hasattr", "(", "ramon", ",", "'segmentclass'", ")", ":", "metadata", ".", "create_dataset", "(", "'SEGMENTCLASS'", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "segmentclass", ")", "if", "hasattr", "(", "ramon", ",", "'synapses'", ")", ":", "metadata", ".", "create_dataset", "(", "'SYNAPSES'", ",", "(", "len", "(", "ramon", ".", "synapses", ")", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "synapses", ")", "if", "hasattr", "(", "ramon", ",", "'organelles'", ")", ":", "metadata", ".", "create_dataset", "(", "'ORGANELLES'", ",", "(", "len", "(", "ramon", ".", "organelles", ")", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "organelles", ")", "if", "hasattr", "(", "ramon", ",", "'organelle_class'", ")", ":", "metadata", ".", "create_dataset", "(", "'ORGANELLECLASS'", ",", "(", "1", ",", ")", ",", "numpy", ".", "uint32", ",", "data", "=", "ramon", ".", "organelle_class", ")", "hdf5", ".", "flush", "(", ")", "tmpfile", ".", "seek", "(", "0", ")", "return", "tmpfile", "return", "False" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
AnnotationType.RAMON
Takes str or int, returns class type
ndio/ramon/__init__.py
def RAMON(typ): """ Takes str or int, returns class type """ if six.PY2: lookup = [str, unicode] elif six.PY3: lookup = [str] if type(typ) is int: return _ramon_types[typ] elif type(typ) in lookup: return _ramon_types[_types[typ]]
def RAMON(typ): """ Takes str or int, returns class type """ if six.PY2: lookup = [str, unicode] elif six.PY3: lookup = [str] if type(typ) is int: return _ramon_types[typ] elif type(typ) in lookup: return _ramon_types[_types[typ]]
[ "Takes", "str", "or", "int", "returns", "class", "type" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/ramon/__init__.py#L137-L149
[ "def", "RAMON", "(", "typ", ")", ":", "if", "six", ".", "PY2", ":", "lookup", "=", "[", "str", ",", "unicode", "]", "elif", "six", ".", "PY3", ":", "lookup", "=", "[", "str", "]", "if", "type", "(", "typ", ")", "is", "int", ":", "return", "_ramon_types", "[", "typ", "]", "elif", "type", "(", "typ", ")", "in", "lookup", ":", "return", "_ramon_types", "[", "_types", "[", "typ", "]", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.get_xy_slice
Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data
ndio/remote/neurodata.py
def get_xy_slice(self, token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution=0): """ Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data """ return self.data.get_xy_slice(token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution)
def get_xy_slice(self, token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution=0): """ Return a binary-encoded, decompressed 2d image. You should specify a 'token' and 'channel' pair. For image data, users should use the channel 'image.' Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int):` The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' z_index (int): The z-slice to image Returns: str: binary image data """ return self.data.get_xy_slice(token, channel, x_start, x_stop, y_start, y_stop, z_index, resolution)
[ "Return", "a", "binary", "-", "encoded", "decompressed", "2d", "image", ".", "You", "should", "specify", "a", "token", "and", "channel", "pair", ".", "For", "image", "data", "users", "should", "use", "the", "channel", "image", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L122-L147
[ "def", "get_xy_slice", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_index", ",", "resolution", "=", "0", ")", ":", "return", "self", ".", "data", ".", "get_xy_slice", "(", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_index", ",", "resolution", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.get_volume
Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data.
ndio/remote/neurodata.py
def get_volume(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data. """ return self.data.get_volume(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, block_size, neariso)
def get_volume(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get a RAMONVolume volumetric cutout from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: ndio.ramon.RAMONVolume: Downloaded data. """ return self.data.get_volume(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, block_size, neariso)
[ "Get", "a", "RAMONVolume", "volumetric", "cutout", "from", "the", "neurodata", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L162-L189
[ "def", "get_volume", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "resolution", "=", "1", ",", "block_size", "=", "DEFAULT_BLOCK_SIZE", ",", "neariso", "=", "False", ")", ":", "return", "self", ".", "data", ".", "get_volume", "(", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "resolution", ",", "block_size", ",", "neariso", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.get_cutout
Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data.
ndio/remote/neurodata.py
def get_cutout(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start=0, t_stop=1, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data. """ return self.data.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start, t_stop, resolution, block_size, neariso)
def get_cutout(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start=0, t_stop=1, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): """ Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data. """ return self.data.get_cutout(token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start, t_stop, resolution, block_size, neariso)
[ "Get", "volumetric", "cutout", "data", "from", "the", "neurodata", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L191-L225
[ "def", "get_cutout", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "t_start", "=", "0", ",", "t_stop", "=", "1", ",", "resolution", "=", "1", ",", "block_size", "=", "DEFAULT_BLOCK_SIZE", ",", "neariso", "=", "False", ")", ":", "return", "self", ".", "data", ".", "get_cutout", "(", "token", ",", "channel", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "t_start", ",", "t_stop", ",", "resolution", ",", "block_size", ",", "neariso", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.post_cutout
Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload.
ndio/remote/neurodata.py
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): """ Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload. """ return self.data.post_cutout(token, channel, x_start, y_start, z_start, data, resolution)
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): """ Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload. """ return self.data.post_cutout(token, channel, x_start, y_start, z_start, data, resolution)
[ "Post", "a", "cutout", "to", "the", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L230-L259
[ "def", "post_cutout", "(", "self", ",", "token", ",", "channel", ",", "x_start", ",", "y_start", ",", "z_start", ",", "data", ",", "resolution", "=", "0", ")", ":", "return", "self", ".", "data", ".", "post_cutout", "(", "token", ",", "channel", ",", "x_start", ",", "y_start", ",", "z_start", ",", "data", ",", "resolution", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.create_project
Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created.
ndio/remote/neurodata.py
def create_project(self, project_name, dataset_name, hostname, is_public, s3backend=0, kvserver='localhost', kvengine='MySQL', mdengine='MySQL', description=''): """ Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created. """ return self.resources.create_project(project_name, dataset_name, hostname, is_public, s3backend, kvserver, kvengine, mdengine, description)
def create_project(self, project_name, dataset_name, hostname, is_public, s3backend=0, kvserver='localhost', kvengine='MySQL', mdengine='MySQL', description=''): """ Creates a project with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on hostname (str): Hostname s3backend (str): S3 region to save the data in is_public (int): 1 is public. 0 is not public. kvserver (str): Server to store key value pairs in kvengine (str): Database to store key value pairs in mdengine (str): ??? description (str): Description for your project Returns: bool: True if project created, false if not created. """ return self.resources.create_project(project_name, dataset_name, hostname, is_public, s3backend, kvserver, kvengine, mdengine, description)
[ "Creates", "a", "project", "with", "the", "given", "parameters", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L406-L441
[ "def", "create_project", "(", "self", ",", "project_name", ",", "dataset_name", ",", "hostname", ",", "is_public", ",", "s3backend", "=", "0", ",", "kvserver", "=", "'localhost'", ",", "kvengine", "=", "'MySQL'", ",", "mdengine", "=", "'MySQL'", ",", "description", "=", "''", ")", ":", "return", "self", ".", "resources", ".", "create_project", "(", "project_name", ",", "dataset_name", ",", "hostname", ",", "is_public", ",", "s3backend", ",", "kvserver", ",", "kvengine", ",", "mdengine", ",", "description", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.create_token
Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created.
ndio/remote/neurodata.py
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ return self.resources.create_token(token_name, project_name, dataset_name, is_public)
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ return self.resources.create_token(token_name, project_name, dataset_name, is_public)
[ "Creates", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "is_public", "(", "int", ")", ":", "1", "is", "public", ".", "0", "is", "not", "public", "Returns", ":", "bool", ":", "True", "if", "project", "created", "false", "if", "not", "created", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L470-L488
[ "def", "create_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ",", "is_public", ")", ":", "return", "self", ".", "resources", ".", "create_token", "(", "token_name", ",", "project_name", ",", "dataset_name", ",", "is_public", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.get_token
Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info
ndio/remote/neurodata.py
def get_token(self, token_name, project_name, dataset_name): """ Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info """ return self.resources.get_token(token_name, project_name, dataset_name)
def get_token(self, token_name, project_name, dataset_name): """ Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info """ return self.resources.get_token(token_name, project_name, dataset_name)
[ "Get", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "Returns", ":", "dict", ":", "Token", "info" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L490-L505
[ "def", "get_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ")", ":", "return", "self", ".", "resources", ".", "get_token", "(", "token_name", ",", "project_name", ",", "dataset_name", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.delete_token
Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted.
ndio/remote/neurodata.py
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ return self.resources.delete_token(token_name, project_name, dataset_name)
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ return self.resources.delete_token(token_name, project_name, dataset_name)
[ "Delete", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "channel_name", "(", "str", ")", ":", "Channel", "name", "project", "is", "based", "on", "Returns", ":", "bool", ":", "True", "if", "project", "deleted", "false", "if", "not", "deleted", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L507-L523
[ "def", "delete_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ")", ":", "return", "self", ".", "resources", ".", "delete_token", "(", "token_name", ",", "project_name", ",", "dataset_name", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.create_dataset
Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not
ndio/remote/neurodata.py
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): """ Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not """ return self.resources.create_dataset(name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset, y_offset, z_offset, scaling_levels, scaling_option, dataset_description, is_public)
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): """ Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not """ return self.resources.create_dataset(name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset, y_offset, z_offset, scaling_levels, scaling_option, dataset_description, is_public)
[ "Creates", "a", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L549-L600
[ "def", "create_dataset", "(", "self", ",", "name", ",", "x_img_size", ",", "y_img_size", ",", "z_img_size", ",", "x_vox_res", ",", "y_vox_res", ",", "z_vox_res", ",", "x_offset", "=", "0", ",", "y_offset", "=", "0", ",", "z_offset", "=", "0", ",", "scaling_levels", "=", "0", ",", "scaling_option", "=", "0", ",", "dataset_description", "=", "\"\"", ",", "is_public", "=", "0", ")", ":", "return", "self", ".", "resources", ".", "create_dataset", "(", "name", ",", "x_img_size", ",", "y_img_size", ",", "z_img_size", ",", "x_vox_res", ",", "y_vox_res", ",", "z_vox_res", ",", "x_offset", ",", "y_offset", ",", "z_offset", ",", "scaling_levels", ",", "scaling_option", ",", "dataset_description", ",", "is_public", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.create_channel
Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason.
ndio/remote/neurodata.py
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ return self.resources.create_channel(channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly, start_time, end_time, propagate, resolution, channel_description)
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ return self.resources.create_channel(channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly, start_time, end_time, propagate, resolution, channel_description)
[ "Create", "a", "new", "channel", "on", "the", "Remote", "using", "channel_data", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L644-L694
[ "def", "create_channel", "(", "self", ",", "channel_name", ",", "project_name", ",", "dataset_name", ",", "channel_type", ",", "dtype", ",", "startwindow", ",", "endwindow", ",", "readonly", "=", "0", ",", "start_time", "=", "0", ",", "end_time", "=", "0", ",", "propagate", "=", "0", ",", "resolution", "=", "0", ",", "channel_description", "=", "''", ")", ":", "return", "self", ".", "resources", ".", "create_channel", "(", "channel_name", ",", "project_name", ",", "dataset_name", ",", "channel_type", ",", "dtype", ",", "startwindow", ",", "endwindow", ",", "readonly", ",", "start_time", ",", "end_time", ",", "propagate", ",", "resolution", ",", "channel_description", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.get_channel
Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info
ndio/remote/neurodata.py
def get_channel(self, channel_name, project_name, dataset_name): """ Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info """ return self.resources.get_channel(channel_name, project_name, dataset_name)
def get_channel(self, channel_name, project_name, dataset_name): """ Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info """ return self.resources.get_channel(channel_name, project_name, dataset_name)
[ "Gets", "info", "about", "a", "channel", "given", "its", "name", "name", "of", "its", "project", "and", "name", "of", "its", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L696-L710
[ "def", "get_channel", "(", "self", ",", "channel_name", ",", "project_name", ",", "dataset_name", ")", ":", "return", "self", ".", "resources", ".", "get_channel", "(", "channel_name", ",", "project_name", ",", "dataset_name", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
neurodata.delete_channel
Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not
ndio/remote/neurodata.py
def delete_channel(self, channel_name, project_name, dataset_name): """ Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not """ return self.resources.delete_channel(channel_name, project_name, dataset_name)
def delete_channel(self, channel_name, project_name, dataset_name): """ Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not """ return self.resources.delete_channel(channel_name, project_name, dataset_name)
[ "Deletes", "a", "channel", "given", "its", "name", "name", "of", "its", "project", "and", "name", "of", "its", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L712-L726
[ "def", "delete_channel", "(", "self", ",", "channel_name", ",", "project_name", ",", "dataset_name", ")", ":", "return", "self", ".", "resources", ".", "delete_channel", "(", "channel_name", ",", "project_name", ",", "dataset_name", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.add_channel
Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None
ndio/remote/ndingest.py
def add_channel(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions=None, resolution=None, windowrange=None, readonly=None): """ Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None """ self.channels[channel_name] = [ channel_name.strip().replace(" ", ""), datatype, channel_type.lower(), data_url, file_format, file_type, exceptions, resolution, windowrange, readonly ]
def add_channel(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions=None, resolution=None, windowrange=None, readonly=None): """ Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None """ self.channels[channel_name] = [ channel_name.strip().replace(" ", ""), datatype, channel_type.lower(), data_url, file_format, file_type, exceptions, resolution, windowrange, readonly ]
[ "Arguments", ":", "channel_name", "(", "str", ")", ":", "Channel", "Name", "is", "the", "specific", "name", "of", "a", "specific", "series", "of", "data", ".", "Standard", "naming", "convention", "is", "to", "do", "ImageTypeIterationNumber", "or", "NameSubProjectName", ".", "datatype", "(", "str", ")", ":", "The", "data", "type", "is", "the", "storage", "method", "of", "data", "in", "the", "channel", ".", "It", "can", "be", "uint8", "uint16", "uint32", "uint64", "or", "float32", ".", "channel_type", "(", "str", ")", ":", "The", "channel", "type", "is", "the", "kind", "of", "data", "being", "stored", "in", "the", "channel", ".", "It", "can", "be", "image", "annotation", "or", "timeseries", ".", "data_url", "(", "str", ")", ":", "This", "url", "points", "to", "the", "root", "directory", "of", "the", "files", ".", "Dropbox", "(", "or", "any", "data", "requiring", "authentication", "to", "download", "such", "as", "private", "s3", ")", "is", "not", "an", "acceptable", "HTTP", "Server", ".", "See", "additional", "instructions", "in", "documentation", "online", "to", "format", "s3", "properly", "so", "it", "is", "http", "accessible", ".", "file_format", "(", "str", ")", ":", "File", "format", "refers", "to", "the", "overarching", "kind", "of", "data", "as", "in", "slices", "(", "normal", "image", "data", ")", "or", "catmaid", "(", "tile", "-", "based", ")", ".", "file_type", "(", "str", ")", ":", "File", "type", "refers", "to", "the", "specific", "type", "of", "file", "that", "the", "data", "is", "stored", "in", "as", "in", "tiff", "png", "or", "tif", ".", "exceptions", "(", "int", ")", ":", "Exceptions", "is", "an", "option", "to", "enable", "the", "possibility", "for", "annotations", "to", "contradict", "each", "other", "(", "assign", "different", "values", "to", "the", "same", "point", ")", ".", "1", "corresponds", "to", "True", "0", "corresponds", "to", "False", ".", "resolution", "(", "int", ")", ":", "Resolution", "is", "the", "starting", "resolution", "of", "the", "data", "being", "uploaded", "to", "the", "channel", ".", "windowrange", "(", "int", "int", ")", ":", "Window", "range", "is", "the", "maximum", "and", "minimum", "pixel", "values", "for", "a", "particular", "image", ".", "This", "is", "used", "so", "that", "the", "image", "can", "be", "displayed", "in", "a", "readable", "way", "for", "viewing", "through", "RESTful", "calls", "readonly", "(", "int", ")", ":", "This", "option", "allows", "the", "user", "to", "control", "if", "after", "the", "initial", "data", "commit", "the", "channel", "is", "read", "-", "only", ".", "Generally", "this", "is", "suggested", "with", "data", "that", "will", "be", "publicly", "viewable", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L66-L113
[ "def", "add_channel", "(", "self", ",", "channel_name", ",", "datatype", ",", "channel_type", ",", "data_url", ",", "file_format", ",", "file_type", ",", "exceptions", "=", "None", ",", "resolution", "=", "None", ",", "windowrange", "=", "None", ",", "readonly", "=", "None", ")", ":", "self", ".", "channels", "[", "channel_name", "]", "=", "[", "channel_name", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "datatype", ",", "channel_type", ".", "lower", "(", ")", ",", "data_url", ",", "file_format", ",", "file_type", ",", "exceptions", ",", "resolution", ",", "windowrange", ",", "readonly", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.add_project
Arguments: project_name (str): Project name is the specific project within a dataset's name. If there is only one project associated with a dataset then standard convention is to name the project the same as its associated dataset. token_name (str): The token name is the default token. If you do not wish to specify one, a default one will be created for you with the same name as the project name. However, if the project is private you must specify a token. public (int): This option allows users to specify if they want the project/channels to be publicly viewable/search-able. (1, 0) = (TRUE, FALSE) Returns: None
ndio/remote/ndingest.py
def add_project(self, project_name, token_name=None, public=None): """ Arguments: project_name (str): Project name is the specific project within a dataset's name. If there is only one project associated with a dataset then standard convention is to name the project the same as its associated dataset. token_name (str): The token name is the default token. If you do not wish to specify one, a default one will be created for you with the same name as the project name. However, if the project is private you must specify a token. public (int): This option allows users to specify if they want the project/channels to be publicly viewable/search-able. (1, 0) = (TRUE, FALSE) Returns: None """ self.project = (project_name.strip().replace(" ", ""), token_name.strip().replace(" ", ""), public)
def add_project(self, project_name, token_name=None, public=None): """ Arguments: project_name (str): Project name is the specific project within a dataset's name. If there is only one project associated with a dataset then standard convention is to name the project the same as its associated dataset. token_name (str): The token name is the default token. If you do not wish to specify one, a default one will be created for you with the same name as the project name. However, if the project is private you must specify a token. public (int): This option allows users to specify if they want the project/channels to be publicly viewable/search-able. (1, 0) = (TRUE, FALSE) Returns: None """ self.project = (project_name.strip().replace(" ", ""), token_name.strip().replace(" ", ""), public)
[ "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "is", "the", "specific", "project", "within", "a", "dataset", "s", "name", ".", "If", "there", "is", "only", "one", "project", "associated", "with", "a", "dataset", "then", "standard", "convention", "is", "to", "name", "the", "project", "the", "same", "as", "its", "associated", "dataset", ".", "token_name", "(", "str", ")", ":", "The", "token", "name", "is", "the", "default", "token", ".", "If", "you", "do", "not", "wish", "to", "specify", "one", "a", "default", "one", "will", "be", "created", "for", "you", "with", "the", "same", "name", "as", "the", "project", "name", ".", "However", "if", "the", "project", "is", "private", "you", "must", "specify", "a", "token", ".", "public", "(", "int", ")", ":", "This", "option", "allows", "users", "to", "specify", "if", "they", "want", "the", "project", "/", "channels", "to", "be", "publicly", "viewable", "/", "search", "-", "able", ".", "(", "1", "0", ")", "=", "(", "TRUE", "FALSE", ")" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L115-L134
[ "def", "add_project", "(", "self", ",", "project_name", ",", "token_name", "=", "None", ",", "public", "=", "None", ")", ":", "self", ".", "project", "=", "(", "project_name", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "token_name", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "public", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.add_dataset
Add a new dataset to the ingest. Arguments: dataset_name (str): Dataset Name is the overarching name of the research effort. Standard naming convention is to do LabNamePublicationYear or LeadResearcherCurrentYear. imagesize (int, int, int): Image size is the pixel count dimensions of the data. For example is the data is stored as a series of 100 slices each 2100x2000 pixel TIFF images, the X,Y,Z dimensions are (2100, 2000, 100). voxelres (float, float, float): Voxel Resolution is the number of voxels per unit pixel. We store X,Y,Z voxel resolution separately. offset (int, int, int): If your data is not well aligned and there is "excess" image data you do not wish to examine, but are present in your images, offset is how you specify where your actual image starts. Offset is provided a pixel coordinate offset from origin which specifies the "actual" origin of the image. The offset is for X,Y,Z dimensions. timerange (int, int): Time Range is a parameter to support storage of Time Series data, so the value of the tuple is a 0 to X range of how many images over time were taken. It takes 2 inputs timeStepStart and timeStepStop. scalinglevels (int): Scaling levels is the number of levels the data is scalable to (how many zoom levels are present in the data). The highest resolution of the data is at scaling level 0, and for each level up the data is down sampled by 2x2 (per slice). To learn more about the sampling service used, visit the the propagation service page. scaling (int): Scaling is the scaling method of the data being stored. 0 corresponds to a Z-slice orientation (as in a collection of tiff images in which each tiff is a slice on the z plane) where data will be scaled only on the xy plane, not the z plane. 1 corresponds to an isotropic orientation (in which each tiff is a slice on the y plane) where data is scaled along all axis. Returns: None
ndio/remote/ndingest.py
def add_dataset(self, dataset_name, imagesize, voxelres, offset=None, timerange=None, scalinglevels=None, scaling=None): """ Add a new dataset to the ingest. Arguments: dataset_name (str): Dataset Name is the overarching name of the research effort. Standard naming convention is to do LabNamePublicationYear or LeadResearcherCurrentYear. imagesize (int, int, int): Image size is the pixel count dimensions of the data. For example is the data is stored as a series of 100 slices each 2100x2000 pixel TIFF images, the X,Y,Z dimensions are (2100, 2000, 100). voxelres (float, float, float): Voxel Resolution is the number of voxels per unit pixel. We store X,Y,Z voxel resolution separately. offset (int, int, int): If your data is not well aligned and there is "excess" image data you do not wish to examine, but are present in your images, offset is how you specify where your actual image starts. Offset is provided a pixel coordinate offset from origin which specifies the "actual" origin of the image. The offset is for X,Y,Z dimensions. timerange (int, int): Time Range is a parameter to support storage of Time Series data, so the value of the tuple is a 0 to X range of how many images over time were taken. It takes 2 inputs timeStepStart and timeStepStop. scalinglevels (int): Scaling levels is the number of levels the data is scalable to (how many zoom levels are present in the data). The highest resolution of the data is at scaling level 0, and for each level up the data is down sampled by 2x2 (per slice). To learn more about the sampling service used, visit the the propagation service page. scaling (int): Scaling is the scaling method of the data being stored. 0 corresponds to a Z-slice orientation (as in a collection of tiff images in which each tiff is a slice on the z plane) where data will be scaled only on the xy plane, not the z plane. 1 corresponds to an isotropic orientation (in which each tiff is a slice on the y plane) where data is scaled along all axis. Returns: None """ self.dataset = (dataset_name.strip().replace(" ", ""), imagesize, voxelres, offset, timerange, scalinglevels, scaling)
def add_dataset(self, dataset_name, imagesize, voxelres, offset=None, timerange=None, scalinglevels=None, scaling=None): """ Add a new dataset to the ingest. Arguments: dataset_name (str): Dataset Name is the overarching name of the research effort. Standard naming convention is to do LabNamePublicationYear or LeadResearcherCurrentYear. imagesize (int, int, int): Image size is the pixel count dimensions of the data. For example is the data is stored as a series of 100 slices each 2100x2000 pixel TIFF images, the X,Y,Z dimensions are (2100, 2000, 100). voxelres (float, float, float): Voxel Resolution is the number of voxels per unit pixel. We store X,Y,Z voxel resolution separately. offset (int, int, int): If your data is not well aligned and there is "excess" image data you do not wish to examine, but are present in your images, offset is how you specify where your actual image starts. Offset is provided a pixel coordinate offset from origin which specifies the "actual" origin of the image. The offset is for X,Y,Z dimensions. timerange (int, int): Time Range is a parameter to support storage of Time Series data, so the value of the tuple is a 0 to X range of how many images over time were taken. It takes 2 inputs timeStepStart and timeStepStop. scalinglevels (int): Scaling levels is the number of levels the data is scalable to (how many zoom levels are present in the data). The highest resolution of the data is at scaling level 0, and for each level up the data is down sampled by 2x2 (per slice). To learn more about the sampling service used, visit the the propagation service page. scaling (int): Scaling is the scaling method of the data being stored. 0 corresponds to a Z-slice orientation (as in a collection of tiff images in which each tiff is a slice on the z plane) where data will be scaled only on the xy plane, not the z plane. 1 corresponds to an isotropic orientation (in which each tiff is a slice on the y plane) where data is scaled along all axis. Returns: None """ self.dataset = (dataset_name.strip().replace(" ", ""), imagesize, voxelres, offset, timerange, scalinglevels, scaling)
[ "Add", "a", "new", "dataset", "to", "the", "ingest", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L136-L180
[ "def", "add_dataset", "(", "self", ",", "dataset_name", ",", "imagesize", ",", "voxelres", ",", "offset", "=", "None", ",", "timerange", "=", "None", ",", "scalinglevels", "=", "None", ",", "scaling", "=", "None", ")", ":", "self", ".", "dataset", "=", "(", "dataset_name", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "imagesize", ",", "voxelres", ",", "offset", ",", "timerange", ",", "scalinglevels", ",", "scaling", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.nd_json
Genarate ND json object.
ndio/remote/ndingest.py
def nd_json(self, dataset, project, channel_list, metadata): """ Genarate ND json object. """ nd_dict = {} nd_dict['dataset'] = self.dataset_dict(*dataset) nd_dict['project'] = self.project_dict(*project) nd_dict['metadata'] = metadata nd_dict['channels'] = {} for channel_name, value in channel_list.items(): nd_dict['channels'][channel_name] = self.channel_dict(*value) return json.dumps(nd_dict, sort_keys=True, indent=4)
def nd_json(self, dataset, project, channel_list, metadata): """ Genarate ND json object. """ nd_dict = {} nd_dict['dataset'] = self.dataset_dict(*dataset) nd_dict['project'] = self.project_dict(*project) nd_dict['metadata'] = metadata nd_dict['channels'] = {} for channel_name, value in channel_list.items(): nd_dict['channels'][channel_name] = self.channel_dict(*value) return json.dumps(nd_dict, sort_keys=True, indent=4)
[ "Genarate", "ND", "json", "object", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L192-L204
[ "def", "nd_json", "(", "self", ",", "dataset", ",", "project", ",", "channel_list", ",", "metadata", ")", ":", "nd_dict", "=", "{", "}", "nd_dict", "[", "'dataset'", "]", "=", "self", ".", "dataset_dict", "(", "*", "dataset", ")", "nd_dict", "[", "'project'", "]", "=", "self", ".", "project_dict", "(", "*", "project", ")", "nd_dict", "[", "'metadata'", "]", "=", "metadata", "nd_dict", "[", "'channels'", "]", "=", "{", "}", "for", "channel_name", ",", "value", "in", "channel_list", ".", "items", "(", ")", ":", "nd_dict", "[", "'channels'", "]", "[", "channel_name", "]", "=", "self", ".", "channel_dict", "(", "*", "value", ")", "return", "json", ".", "dumps", "(", "nd_dict", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.dataset_dict
Generate the dataset dictionary
ndio/remote/ndingest.py
def dataset_dict( self, dataset_name, imagesize, voxelres, offset, timerange, scalinglevels, scaling): """Generate the dataset dictionary""" dataset_dict = {} dataset_dict['dataset_name'] = dataset_name dataset_dict['imagesize'] = imagesize dataset_dict['voxelres'] = voxelres if offset is not None: dataset_dict['offset'] = offset if timerange is not None: dataset_dict['timerange'] = timerange if scalinglevels is not None: dataset_dict['scalinglevels'] = scalinglevels if scaling is not None: dataset_dict['scaling'] = scaling return dataset_dict
def dataset_dict( self, dataset_name, imagesize, voxelres, offset, timerange, scalinglevels, scaling): """Generate the dataset dictionary""" dataset_dict = {} dataset_dict['dataset_name'] = dataset_name dataset_dict['imagesize'] = imagesize dataset_dict['voxelres'] = voxelres if offset is not None: dataset_dict['offset'] = offset if timerange is not None: dataset_dict['timerange'] = timerange if scalinglevels is not None: dataset_dict['scalinglevels'] = scalinglevels if scaling is not None: dataset_dict['scaling'] = scaling return dataset_dict
[ "Generate", "the", "dataset", "dictionary" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L220-L236
[ "def", "dataset_dict", "(", "self", ",", "dataset_name", ",", "imagesize", ",", "voxelres", ",", "offset", ",", "timerange", ",", "scalinglevels", ",", "scaling", ")", ":", "dataset_dict", "=", "{", "}", "dataset_dict", "[", "'dataset_name'", "]", "=", "dataset_name", "dataset_dict", "[", "'imagesize'", "]", "=", "imagesize", "dataset_dict", "[", "'voxelres'", "]", "=", "voxelres", "if", "offset", "is", "not", "None", ":", "dataset_dict", "[", "'offset'", "]", "=", "offset", "if", "timerange", "is", "not", "None", ":", "dataset_dict", "[", "'timerange'", "]", "=", "timerange", "if", "scalinglevels", "is", "not", "None", ":", "dataset_dict", "[", "'scalinglevels'", "]", "=", "scalinglevels", "if", "scaling", "is", "not", "None", ":", "dataset_dict", "[", "'scaling'", "]", "=", "scaling", "return", "dataset_dict" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.channel_dict
Generate the project dictionary.
ndio/remote/ndingest.py
def channel_dict(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions, resolution, windowrange, readonly): """ Generate the project dictionary. """ channel_dict = {} channel_dict['channel_name'] = channel_name channel_dict['datatype'] = datatype channel_dict['channel_type'] = channel_type if exceptions is not None: channel_dict['exceptions'] = exceptions if resolution is not None: channel_dict['resolution'] = resolution if windowrange is not None: channel_dict['windowrange'] = windowrange if readonly is not None: channel_dict['readonly'] = readonly channel_dict['data_url'] = data_url channel_dict['file_format'] = file_format channel_dict['file_type'] = file_type return channel_dict
def channel_dict(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions, resolution, windowrange, readonly): """ Generate the project dictionary. """ channel_dict = {} channel_dict['channel_name'] = channel_name channel_dict['datatype'] = datatype channel_dict['channel_type'] = channel_type if exceptions is not None: channel_dict['exceptions'] = exceptions if resolution is not None: channel_dict['resolution'] = resolution if windowrange is not None: channel_dict['windowrange'] = windowrange if readonly is not None: channel_dict['readonly'] = readonly channel_dict['data_url'] = data_url channel_dict['file_format'] = file_format channel_dict['file_type'] = file_type return channel_dict
[ "Generate", "the", "project", "dictionary", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L238-L259
[ "def", "channel_dict", "(", "self", ",", "channel_name", ",", "datatype", ",", "channel_type", ",", "data_url", ",", "file_format", ",", "file_type", ",", "exceptions", ",", "resolution", ",", "windowrange", ",", "readonly", ")", ":", "channel_dict", "=", "{", "}", "channel_dict", "[", "'channel_name'", "]", "=", "channel_name", "channel_dict", "[", "'datatype'", "]", "=", "datatype", "channel_dict", "[", "'channel_type'", "]", "=", "channel_type", "if", "exceptions", "is", "not", "None", ":", "channel_dict", "[", "'exceptions'", "]", "=", "exceptions", "if", "resolution", "is", "not", "None", ":", "channel_dict", "[", "'resolution'", "]", "=", "resolution", "if", "windowrange", "is", "not", "None", ":", "channel_dict", "[", "'windowrange'", "]", "=", "windowrange", "if", "readonly", "is", "not", "None", ":", "channel_dict", "[", "'readonly'", "]", "=", "readonly", "channel_dict", "[", "'data_url'", "]", "=", "data_url", "channel_dict", "[", "'file_format'", "]", "=", "file_format", "channel_dict", "[", "'file_type'", "]", "=", "file_type", "return", "channel_dict" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.project_dict
Genarate the project dictionary.
ndio/remote/ndingest.py
def project_dict(self, project_name, token_name, public): """ Genarate the project dictionary. """ project_dict = {} project_dict['project_name'] = project_name if token_name is not None: if token_name == '': project_dict['token_name'] = project_name else: project_dict['token_name'] = token_name else: project_dict['token_name'] = project_name if public is not None: project_dict['public'] = public return project_dict
def project_dict(self, project_name, token_name, public): """ Genarate the project dictionary. """ project_dict = {} project_dict['project_name'] = project_name if token_name is not None: if token_name == '': project_dict['token_name'] = project_name else: project_dict['token_name'] = token_name else: project_dict['token_name'] = project_name if public is not None: project_dict['public'] = public return project_dict
[ "Genarate", "the", "project", "dictionary", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L261-L277
[ "def", "project_dict", "(", "self", ",", "project_name", ",", "token_name", ",", "public", ")", ":", "project_dict", "=", "{", "}", "project_dict", "[", "'project_name'", "]", "=", "project_name", "if", "token_name", "is", "not", "None", ":", "if", "token_name", "==", "''", ":", "project_dict", "[", "'token_name'", "]", "=", "project_name", "else", ":", "project_dict", "[", "'token_name'", "]", "=", "token_name", "else", ":", "project_dict", "[", "'token_name'", "]", "=", "project_name", "if", "public", "is", "not", "None", ":", "project_dict", "[", "'public'", "]", "=", "public", "return", "project_dict" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.identify_imagesize
Identify the image size using the data location and other parameters
ndio/remote/ndingest.py
def identify_imagesize(self, image_type, image_path='/tmp/img.'): """ Identify the image size using the data location and other parameters """ dims = () try: if (image_type.lower() == 'png'): dims = np.shape(ndpng.load('{}{}'.format( image_path, image_type ))) elif (image_type.lower() == 'tif' or image_type.lower() == 'tiff'): dims = np.shape(ndtiff.load('{}{}'.format( image_path, image_type ))) else: raise ValueError("Unsupported image type.") except: raise OSError('The file was not accessible at {}{}'.format( image_path, image_type )) return dims[::-1]
def identify_imagesize(self, image_type, image_path='/tmp/img.'): """ Identify the image size using the data location and other parameters """ dims = () try: if (image_type.lower() == 'png'): dims = np.shape(ndpng.load('{}{}'.format( image_path, image_type ))) elif (image_type.lower() == 'tif' or image_type.lower() == 'tiff'): dims = np.shape(ndtiff.load('{}{}'.format( image_path, image_type ))) else: raise ValueError("Unsupported image type.") except: raise OSError('The file was not accessible at {}{}'.format( image_path, image_type )) return dims[::-1]
[ "Identify", "the", "image", "size", "using", "the", "data", "location", "and", "other", "parameters" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L279-L300
[ "def", "identify_imagesize", "(", "self", ",", "image_type", ",", "image_path", "=", "'/tmp/img.'", ")", ":", "dims", "=", "(", ")", "try", ":", "if", "(", "image_type", ".", "lower", "(", ")", "==", "'png'", ")", ":", "dims", "=", "np", ".", "shape", "(", "ndpng", ".", "load", "(", "'{}{}'", ".", "format", "(", "image_path", ",", "image_type", ")", ")", ")", "elif", "(", "image_type", ".", "lower", "(", ")", "==", "'tif'", "or", "image_type", ".", "lower", "(", ")", "==", "'tiff'", ")", ":", "dims", "=", "np", ".", "shape", "(", "ndtiff", ".", "load", "(", "'{}{}'", ".", "format", "(", "image_path", ",", "image_type", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported image type.\"", ")", "except", ":", "raise", "OSError", "(", "'The file was not accessible at {}{}'", ".", "format", "(", "image_path", ",", "image_type", ")", ")", "return", "dims", "[", ":", ":", "-", "1", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.verify_path
Verify the path supplied.
ndio/remote/ndingest.py
def verify_path(self, data, verifytype): """ Verify the path supplied. """ # Insert try and catch blocks try: token_name = data["project"]["token_name"] except: token_name = data["project"]["project_name"] channel_names = list(data["channels"].copy().keys()) imgsz = data['dataset']['imagesize'] for i in range(0, len(channel_names)): channel_type = data["channels"][ channel_names[i]]["channel_type"] path = data["channels"][channel_names[i]]["data_url"] aws_pattern = re.compile("^(http:\/\/)(.+)(\.s3\.amazonaws\.com)") file_type = data["channels"][channel_names[i]]["file_type"] if "offset" in data["dataset"]: offset = data["dataset"]["offset"][0] else: offset = 0 if (aws_pattern.match(path)): verifytype = VERIFY_BY_SLICE if (channel_type == "timeseries"): timerange = data["dataset"]["timerange"] try: assert(timerange[0] != timerange[1]) except AssertionError: raise ValueError('Timeseries values are the same, did you\ specify the time steps?') for j in range(timerange[0], timerange[1] + 1): # Test for tifs or such? Currently test for just not # empty if (verifytype == VERIFY_BY_FOLDER): work_path = "{}/{}/{}/time{}/".format( path, token_name, channel_names[i], ("%04d" % j)) elif (verifytype == VERIFY_BY_SLICE): work_path = "{}/{}/{}/time{}/{}.{}".format( path, token_name, channel_names[i], ("%04d" % j), ("%04d" % offset), file_type) else: raise TypeError('Incorrect verify method') # Check for accessibility try: if (verifytype == VERIFY_BY_FOLDER): resp = requests.head(work_path) assert(resp.status_code == 200) elif (verifytype == VERIFY_BY_SLICE): resp = requests.get( work_path, stream=True, verify=False) with open('/tmp/img.{}'.format(file_type), 'wb') as out_file: shutil.copyfileobj(resp.raw, out_file) out_file.close() assert(resp.status_code == 200) resp.close() except AssertionError: raise OSError('Files are not http accessible: \ Error: {}, Path: {}'.format(resp.status_code, work_path)) # Attempt to Verify imagesize here try: if (verifytype == VERIFY_BY_SLICE): assert(list(self.identify_imagesize(file_type)) == imgsz[0:2]) except: raise ValueError('File image size does not match\ provided image size.') else: # Test for tifs or such? Currently test for just not empty if (verifytype == VERIFY_BY_FOLDER): work_path = "{}/{}/{}/".format( path, token_name, channel_names[i]) elif (verifytype == VERIFY_BY_SLICE): work_path = "{}/{}/{}/{}.{}".format( path, token_name, channel_names[i], ("%04d" % offset), file_type) else: raise TypeError('Incorrect verify method') # Check for accessibility if (verifytype == VERIFY_BY_FOLDER): resp = requests.head(work_path) elif (verifytype == VERIFY_BY_SLICE): resp = requests.get(work_path, stream=True, verify=False) with open('/tmp/img.{}'.format(file_type), 'wb') as out_file: shutil.copyfileobj(resp.raw, out_file) out_file.close() resp.close() if (resp.status_code >= 300): raise OSError('Files are not http accessible: \ Error: {}, Path: {}'.format(resp.status_code, work_path)) # Attempt to Verify imagesize here try: if (verifytype == VERIFY_BY_SLICE): assert(list(self.identify_imagesize(file_type)) == imgsz[0:2]) except: raise ValueError('File image size does not match\ provided image size.')
def verify_path(self, data, verifytype): """ Verify the path supplied. """ # Insert try and catch blocks try: token_name = data["project"]["token_name"] except: token_name = data["project"]["project_name"] channel_names = list(data["channels"].copy().keys()) imgsz = data['dataset']['imagesize'] for i in range(0, len(channel_names)): channel_type = data["channels"][ channel_names[i]]["channel_type"] path = data["channels"][channel_names[i]]["data_url"] aws_pattern = re.compile("^(http:\/\/)(.+)(\.s3\.amazonaws\.com)") file_type = data["channels"][channel_names[i]]["file_type"] if "offset" in data["dataset"]: offset = data["dataset"]["offset"][0] else: offset = 0 if (aws_pattern.match(path)): verifytype = VERIFY_BY_SLICE if (channel_type == "timeseries"): timerange = data["dataset"]["timerange"] try: assert(timerange[0] != timerange[1]) except AssertionError: raise ValueError('Timeseries values are the same, did you\ specify the time steps?') for j in range(timerange[0], timerange[1] + 1): # Test for tifs or such? Currently test for just not # empty if (verifytype == VERIFY_BY_FOLDER): work_path = "{}/{}/{}/time{}/".format( path, token_name, channel_names[i], ("%04d" % j)) elif (verifytype == VERIFY_BY_SLICE): work_path = "{}/{}/{}/time{}/{}.{}".format( path, token_name, channel_names[i], ("%04d" % j), ("%04d" % offset), file_type) else: raise TypeError('Incorrect verify method') # Check for accessibility try: if (verifytype == VERIFY_BY_FOLDER): resp = requests.head(work_path) assert(resp.status_code == 200) elif (verifytype == VERIFY_BY_SLICE): resp = requests.get( work_path, stream=True, verify=False) with open('/tmp/img.{}'.format(file_type), 'wb') as out_file: shutil.copyfileobj(resp.raw, out_file) out_file.close() assert(resp.status_code == 200) resp.close() except AssertionError: raise OSError('Files are not http accessible: \ Error: {}, Path: {}'.format(resp.status_code, work_path)) # Attempt to Verify imagesize here try: if (verifytype == VERIFY_BY_SLICE): assert(list(self.identify_imagesize(file_type)) == imgsz[0:2]) except: raise ValueError('File image size does not match\ provided image size.') else: # Test for tifs or such? Currently test for just not empty if (verifytype == VERIFY_BY_FOLDER): work_path = "{}/{}/{}/".format( path, token_name, channel_names[i]) elif (verifytype == VERIFY_BY_SLICE): work_path = "{}/{}/{}/{}.{}".format( path, token_name, channel_names[i], ("%04d" % offset), file_type) else: raise TypeError('Incorrect verify method') # Check for accessibility if (verifytype == VERIFY_BY_FOLDER): resp = requests.head(work_path) elif (verifytype == VERIFY_BY_SLICE): resp = requests.get(work_path, stream=True, verify=False) with open('/tmp/img.{}'.format(file_type), 'wb') as out_file: shutil.copyfileobj(resp.raw, out_file) out_file.close() resp.close() if (resp.status_code >= 300): raise OSError('Files are not http accessible: \ Error: {}, Path: {}'.format(resp.status_code, work_path)) # Attempt to Verify imagesize here try: if (verifytype == VERIFY_BY_SLICE): assert(list(self.identify_imagesize(file_type)) == imgsz[0:2]) except: raise ValueError('File image size does not match\ provided image size.')
[ "Verify", "the", "path", "supplied", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L302-L409
[ "def", "verify_path", "(", "self", ",", "data", ",", "verifytype", ")", ":", "# Insert try and catch blocks", "try", ":", "token_name", "=", "data", "[", "\"project\"", "]", "[", "\"token_name\"", "]", "except", ":", "token_name", "=", "data", "[", "\"project\"", "]", "[", "\"project_name\"", "]", "channel_names", "=", "list", "(", "data", "[", "\"channels\"", "]", ".", "copy", "(", ")", ".", "keys", "(", ")", ")", "imgsz", "=", "data", "[", "'dataset'", "]", "[", "'imagesize'", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "channel_names", ")", ")", ":", "channel_type", "=", "data", "[", "\"channels\"", "]", "[", "channel_names", "[", "i", "]", "]", "[", "\"channel_type\"", "]", "path", "=", "data", "[", "\"channels\"", "]", "[", "channel_names", "[", "i", "]", "]", "[", "\"data_url\"", "]", "aws_pattern", "=", "re", ".", "compile", "(", "\"^(http:\\/\\/)(.+)(\\.s3\\.amazonaws\\.com)\"", ")", "file_type", "=", "data", "[", "\"channels\"", "]", "[", "channel_names", "[", "i", "]", "]", "[", "\"file_type\"", "]", "if", "\"offset\"", "in", "data", "[", "\"dataset\"", "]", ":", "offset", "=", "data", "[", "\"dataset\"", "]", "[", "\"offset\"", "]", "[", "0", "]", "else", ":", "offset", "=", "0", "if", "(", "aws_pattern", ".", "match", "(", "path", ")", ")", ":", "verifytype", "=", "VERIFY_BY_SLICE", "if", "(", "channel_type", "==", "\"timeseries\"", ")", ":", "timerange", "=", "data", "[", "\"dataset\"", "]", "[", "\"timerange\"", "]", "try", ":", "assert", "(", "timerange", "[", "0", "]", "!=", "timerange", "[", "1", "]", ")", "except", "AssertionError", ":", "raise", "ValueError", "(", "'Timeseries values are the same, did you\\\nspecify the time steps?'", ")", "for", "j", "in", "range", "(", "timerange", "[", "0", "]", ",", "timerange", "[", "1", "]", "+", "1", ")", ":", "# Test for tifs or such? Currently test for just not", "# empty", "if", "(", "verifytype", "==", "VERIFY_BY_FOLDER", ")", ":", "work_path", "=", "\"{}/{}/{}/time{}/\"", ".", "format", "(", "path", ",", "token_name", ",", "channel_names", "[", "i", "]", ",", "(", "\"%04d\"", "%", "j", ")", ")", "elif", "(", "verifytype", "==", "VERIFY_BY_SLICE", ")", ":", "work_path", "=", "\"{}/{}/{}/time{}/{}.{}\"", ".", "format", "(", "path", ",", "token_name", ",", "channel_names", "[", "i", "]", ",", "(", "\"%04d\"", "%", "j", ")", ",", "(", "\"%04d\"", "%", "offset", ")", ",", "file_type", ")", "else", ":", "raise", "TypeError", "(", "'Incorrect verify method'", ")", "# Check for accessibility", "try", ":", "if", "(", "verifytype", "==", "VERIFY_BY_FOLDER", ")", ":", "resp", "=", "requests", ".", "head", "(", "work_path", ")", "assert", "(", "resp", ".", "status_code", "==", "200", ")", "elif", "(", "verifytype", "==", "VERIFY_BY_SLICE", ")", ":", "resp", "=", "requests", ".", "get", "(", "work_path", ",", "stream", "=", "True", ",", "verify", "=", "False", ")", "with", "open", "(", "'/tmp/img.{}'", ".", "format", "(", "file_type", ")", ",", "'wb'", ")", "as", "out_file", ":", "shutil", ".", "copyfileobj", "(", "resp", ".", "raw", ",", "out_file", ")", "out_file", ".", "close", "(", ")", "assert", "(", "resp", ".", "status_code", "==", "200", ")", "resp", ".", "close", "(", ")", "except", "AssertionError", ":", "raise", "OSError", "(", "'Files are not http accessible: \\\n Error: {}, Path: {}'", ".", "format", "(", "resp", ".", "status_code", ",", "work_path", ")", ")", "# Attempt to Verify imagesize here", "try", ":", "if", "(", "verifytype", "==", "VERIFY_BY_SLICE", ")", ":", "assert", "(", "list", "(", "self", ".", "identify_imagesize", "(", "file_type", ")", ")", "==", "imgsz", "[", "0", ":", "2", "]", ")", "except", ":", "raise", "ValueError", "(", "'File image size does not match\\\nprovided image size.'", ")", "else", ":", "# Test for tifs or such? Currently test for just not empty", "if", "(", "verifytype", "==", "VERIFY_BY_FOLDER", ")", ":", "work_path", "=", "\"{}/{}/{}/\"", ".", "format", "(", "path", ",", "token_name", ",", "channel_names", "[", "i", "]", ")", "elif", "(", "verifytype", "==", "VERIFY_BY_SLICE", ")", ":", "work_path", "=", "\"{}/{}/{}/{}.{}\"", ".", "format", "(", "path", ",", "token_name", ",", "channel_names", "[", "i", "]", ",", "(", "\"%04d\"", "%", "offset", ")", ",", "file_type", ")", "else", ":", "raise", "TypeError", "(", "'Incorrect verify method'", ")", "# Check for accessibility", "if", "(", "verifytype", "==", "VERIFY_BY_FOLDER", ")", ":", "resp", "=", "requests", ".", "head", "(", "work_path", ")", "elif", "(", "verifytype", "==", "VERIFY_BY_SLICE", ")", ":", "resp", "=", "requests", ".", "get", "(", "work_path", ",", "stream", "=", "True", ",", "verify", "=", "False", ")", "with", "open", "(", "'/tmp/img.{}'", ".", "format", "(", "file_type", ")", ",", "'wb'", ")", "as", "out_file", ":", "shutil", ".", "copyfileobj", "(", "resp", ".", "raw", ",", "out_file", ")", "out_file", ".", "close", "(", ")", "resp", ".", "close", "(", ")", "if", "(", "resp", ".", "status_code", ">=", "300", ")", ":", "raise", "OSError", "(", "'Files are not http accessible: \\\n Error: {}, Path: {}'", ".", "format", "(", "resp", ".", "status_code", ",", "work_path", ")", ")", "# Attempt to Verify imagesize here", "try", ":", "if", "(", "verifytype", "==", "VERIFY_BY_SLICE", ")", ":", "assert", "(", "list", "(", "self", ".", "identify_imagesize", "(", "file_type", ")", ")", "==", "imgsz", "[", "0", ":", "2", "]", ")", "except", ":", "raise", "ValueError", "(", "'File image size does not match\\\nprovided image size.'", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.put_data
Try to post data to the server.
ndio/remote/ndingest.py
def put_data(self, data): """ Try to post data to the server. """ URLPath = self.oo.url("autoIngest/") # URLPath = 'https://{}/ca/autoIngest/'.format(self.oo.site_host) try: response = requests.post(URLPath, data=json.dumps(data), verify=False) assert(response.status_code == 200) print("From ndio: {}".format(response.content)) except: raise OSError("Error in posting JSON file {}\ ".format(response.status_code))
def put_data(self, data): """ Try to post data to the server. """ URLPath = self.oo.url("autoIngest/") # URLPath = 'https://{}/ca/autoIngest/'.format(self.oo.site_host) try: response = requests.post(URLPath, data=json.dumps(data), verify=False) assert(response.status_code == 200) print("From ndio: {}".format(response.content)) except: raise OSError("Error in posting JSON file {}\ ".format(response.status_code))
[ "Try", "to", "post", "data", "to", "the", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L452-L465
[ "def", "put_data", "(", "self", ",", "data", ")", ":", "URLPath", "=", "self", ".", "oo", ".", "url", "(", "\"autoIngest/\"", ")", "# URLPath = 'https://{}/ca/autoIngest/'.format(self.oo.site_host)", "try", ":", "response", "=", "requests", ".", "post", "(", "URLPath", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "verify", "=", "False", ")", "assert", "(", "response", ".", "status_code", "==", "200", ")", "print", "(", "\"From ndio: {}\"", ".", "format", "(", "response", ".", "content", ")", ")", "except", ":", "raise", "OSError", "(", "\"Error in posting JSON file {}\\\n\"", ".", "format", "(", "response", ".", "status_code", ")", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.post_data
Arguments: file_name (str): The file name of the json file to post (optional). If this is left unspecified it is assumed the data is in the AutoIngest object. dev (bool): If pushing to a microns dev branch server set this to True, if not leave False. verifytype (enum): Set http verification type, by checking the first slice is accessible or by checking channel folder. NOTE: If verification occurs by folder there is NO image size or type verification. Enum: [Folder, Slice] Returns: None
ndio/remote/ndingest.py
def post_data(self, file_name=None, legacy=False, verifytype=VERIFY_BY_SLICE): """ Arguments: file_name (str): The file name of the json file to post (optional). If this is left unspecified it is assumed the data is in the AutoIngest object. dev (bool): If pushing to a microns dev branch server set this to True, if not leave False. verifytype (enum): Set http verification type, by checking the first slice is accessible or by checking channel folder. NOTE: If verification occurs by folder there is NO image size or type verification. Enum: [Folder, Slice] Returns: None """ if (file_name is None): complete_example = ( self.dataset, self.project, self.channels, self.metadata) data = json.loads(self.nd_json(*complete_example)) else: try: with open(file_name) as data_file: data = json.load(data_file) except: raise OSError("Error opening file") # self.verify_path(data, verifytype) # self.verify_json(data) self.put_data(data)
def post_data(self, file_name=None, legacy=False, verifytype=VERIFY_BY_SLICE): """ Arguments: file_name (str): The file name of the json file to post (optional). If this is left unspecified it is assumed the data is in the AutoIngest object. dev (bool): If pushing to a microns dev branch server set this to True, if not leave False. verifytype (enum): Set http verification type, by checking the first slice is accessible or by checking channel folder. NOTE: If verification occurs by folder there is NO image size or type verification. Enum: [Folder, Slice] Returns: None """ if (file_name is None): complete_example = ( self.dataset, self.project, self.channels, self.metadata) data = json.loads(self.nd_json(*complete_example)) else: try: with open(file_name) as data_file: data = json.load(data_file) except: raise OSError("Error opening file") # self.verify_path(data, verifytype) # self.verify_json(data) self.put_data(data)
[ "Arguments", ":", "file_name", "(", "str", ")", ":", "The", "file", "name", "of", "the", "json", "file", "to", "post", "(", "optional", ")", ".", "If", "this", "is", "left", "unspecified", "it", "is", "assumed", "the", "data", "is", "in", "the", "AutoIngest", "object", ".", "dev", "(", "bool", ")", ":", "If", "pushing", "to", "a", "microns", "dev", "branch", "server", "set", "this", "to", "True", "if", "not", "leave", "False", ".", "verifytype", "(", "enum", ")", ":", "Set", "http", "verification", "type", "by", "checking", "the", "first", "slice", "is", "accessible", "or", "by", "checking", "channel", "folder", ".", "NOTE", ":", "If", "verification", "occurs", "by", "folder", "there", "is", "NO", "image", "size", "or", "type", "verification", ".", "Enum", ":", "[", "Folder", "Slice", "]" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L467-L498
[ "def", "post_data", "(", "self", ",", "file_name", "=", "None", ",", "legacy", "=", "False", ",", "verifytype", "=", "VERIFY_BY_SLICE", ")", ":", "if", "(", "file_name", "is", "None", ")", ":", "complete_example", "=", "(", "self", ".", "dataset", ",", "self", ".", "project", ",", "self", ".", "channels", ",", "self", ".", "metadata", ")", "data", "=", "json", ".", "loads", "(", "self", ".", "nd_json", "(", "*", "complete_example", ")", ")", "else", ":", "try", ":", "with", "open", "(", "file_name", ")", "as", "data_file", ":", "data", "=", "json", ".", "load", "(", "data_file", ")", "except", ":", "raise", "OSError", "(", "\"Error opening file\"", ")", "# self.verify_path(data, verifytype)", "# self.verify_json(data)", "self", ".", "put_data", "(", "data", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
NDIngest.output_json
Arguments: file_name(str : '/tmp/ND.json'): The file name to store the json to Returns: None
ndio/remote/ndingest.py
def output_json(self, file_name='/tmp/ND.json'): """ Arguments: file_name(str : '/tmp/ND.json'): The file name to store the json to Returns: None """ complete_example = ( self.dataset, self.project, self.channels, self.metadata) data = json.loads(self.nd_json(*complete_example)) # self.verify_json(data) self.verify_path(data, VERIFY_BY_SLICE) f = open(file_name, 'w') f.write(str(data)) f.close()
def output_json(self, file_name='/tmp/ND.json'): """ Arguments: file_name(str : '/tmp/ND.json'): The file name to store the json to Returns: None """ complete_example = ( self.dataset, self.project, self.channels, self.metadata) data = json.loads(self.nd_json(*complete_example)) # self.verify_json(data) self.verify_path(data, VERIFY_BY_SLICE) f = open(file_name, 'w') f.write(str(data)) f.close()
[ "Arguments", ":", "file_name", "(", "str", ":", "/", "tmp", "/", "ND", ".", "json", ")", ":", "The", "file", "name", "to", "store", "the", "json", "to" ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L500-L517
[ "def", "output_json", "(", "self", ",", "file_name", "=", "'/tmp/ND.json'", ")", ":", "complete_example", "=", "(", "self", ".", "dataset", ",", "self", ".", "project", ",", "self", ".", "channels", ",", "self", ".", "metadata", ")", "data", "=", "json", ".", "loads", "(", "self", ".", "nd_json", "(", "*", "complete_example", ")", ")", "# self.verify_json(data)", "self", ".", "verify_path", "(", "data", ",", "VERIFY_BY_SLICE", ")", "f", "=", "open", "(", "file_name", ",", "'w'", ")", "f", ".", "write", "(", "str", "(", "data", ")", ")", "f", ".", "close", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
find_path
Find path for given workspace and|or repository.
yoda/__init__.py
def find_path(name, config, wsonly=False): """Find path for given workspace and|or repository.""" workspace = Workspace(config) config = config["workspaces"] path_list = {} if name.find('/') != -1: wsonly = False try: ws, repo = name.split('/') except ValueError: raise ValueError("There is too many / in `name` argument. " "Argument syntax: `workspace/repository`.") if (workspace.exists(ws)): if (repo in config[ws]["repositories"]): path_name = "%s/%s" % (ws, repo) path_list[path_name] = config[ws]["repositories"][repo] for ws_name, ws in sorted(config.items()): if (name == ws_name): if wsonly is True: return {ws_name: ws["path"]} repositories = sorted(config[ws_name]["repositories"].items()) for name, path in repositories: path_list["%s/%s" % (ws_name, name)] = path break for repo_name, repo_path in sorted(ws["repositories"].items()): if (repo_name == name): path_list["%s/%s" % (ws_name, repo_name)] = repo_path return path_list
def find_path(name, config, wsonly=False): """Find path for given workspace and|or repository.""" workspace = Workspace(config) config = config["workspaces"] path_list = {} if name.find('/') != -1: wsonly = False try: ws, repo = name.split('/') except ValueError: raise ValueError("There is too many / in `name` argument. " "Argument syntax: `workspace/repository`.") if (workspace.exists(ws)): if (repo in config[ws]["repositories"]): path_name = "%s/%s" % (ws, repo) path_list[path_name] = config[ws]["repositories"][repo] for ws_name, ws in sorted(config.items()): if (name == ws_name): if wsonly is True: return {ws_name: ws["path"]} repositories = sorted(config[ws_name]["repositories"].items()) for name, path in repositories: path_list["%s/%s" % (ws_name, name)] = path break for repo_name, repo_path in sorted(ws["repositories"].items()): if (repo_name == name): path_list["%s/%s" % (ws_name, repo_name)] = repo_path return path_list
[ "Find", "path", "for", "given", "workspace", "and|or", "repository", "." ]
Numergy/yoda
python
https://github.com/Numergy/yoda/blob/109f0e9441130488b0155f05883ef6531cf46ee9/yoda/__init__.py#L17-L49
[ "def", "find_path", "(", "name", ",", "config", ",", "wsonly", "=", "False", ")", ":", "workspace", "=", "Workspace", "(", "config", ")", "config", "=", "config", "[", "\"workspaces\"", "]", "path_list", "=", "{", "}", "if", "name", ".", "find", "(", "'/'", ")", "!=", "-", "1", ":", "wsonly", "=", "False", "try", ":", "ws", ",", "repo", "=", "name", ".", "split", "(", "'/'", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"There is too many / in `name` argument. \"", "\"Argument syntax: `workspace/repository`.\"", ")", "if", "(", "workspace", ".", "exists", "(", "ws", ")", ")", ":", "if", "(", "repo", "in", "config", "[", "ws", "]", "[", "\"repositories\"", "]", ")", ":", "path_name", "=", "\"%s/%s\"", "%", "(", "ws", ",", "repo", ")", "path_list", "[", "path_name", "]", "=", "config", "[", "ws", "]", "[", "\"repositories\"", "]", "[", "repo", "]", "for", "ws_name", ",", "ws", "in", "sorted", "(", "config", ".", "items", "(", ")", ")", ":", "if", "(", "name", "==", "ws_name", ")", ":", "if", "wsonly", "is", "True", ":", "return", "{", "ws_name", ":", "ws", "[", "\"path\"", "]", "}", "repositories", "=", "sorted", "(", "config", "[", "ws_name", "]", "[", "\"repositories\"", "]", ".", "items", "(", ")", ")", "for", "name", ",", "path", "in", "repositories", ":", "path_list", "[", "\"%s/%s\"", "%", "(", "ws_name", ",", "name", ")", "]", "=", "path", "break", "for", "repo_name", ",", "repo_path", "in", "sorted", "(", "ws", "[", "\"repositories\"", "]", ".", "items", "(", ")", ")", ":", "if", "(", "repo_name", "==", "name", ")", ":", "path_list", "[", "\"%s/%s\"", "%", "(", "ws_name", ",", "repo_name", ")", "]", "=", "repo_path", "return", "path_list" ]
109f0e9441130488b0155f05883ef6531cf46ee9
test
metadata.get_public_tokens
Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens
ndio/remote/metadata.py
def get_public_tokens(self): """ Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens """ r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json()
def get_public_tokens(self): """ Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens """ r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json()
[ "Get", "a", "list", "of", "public", "tokens", "available", "on", "this", "server", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L44-L55
[ "def", "get_public_tokens", "(", "self", ")", ":", "r", "=", "self", ".", "remote_utils", ".", "get_url", "(", "self", ".", "url", "(", ")", "+", "\"public_tokens/\"", ")", "return", "r", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
metadata.get_public_datasets_and_tokens
NOTE: VERY SLOW! Get a dictionary relating key:dataset to value:[tokens] that rely on that dataset. Arguments: None Returns: dict: relating key:dataset to value:[tokens]
ndio/remote/metadata.py
def get_public_datasets_and_tokens(self): """ NOTE: VERY SLOW! Get a dictionary relating key:dataset to value:[tokens] that rely on that dataset. Arguments: None Returns: dict: relating key:dataset to value:[tokens] """ datasets = {} tokens = self.get_public_tokens() for t in tokens: dataset = self.get_token_dataset(t) if dataset in datasets: datasets[dataset].append(t) else: datasets[dataset] = [t] return datasets
def get_public_datasets_and_tokens(self): """ NOTE: VERY SLOW! Get a dictionary relating key:dataset to value:[tokens] that rely on that dataset. Arguments: None Returns: dict: relating key:dataset to value:[tokens] """ datasets = {} tokens = self.get_public_tokens() for t in tokens: dataset = self.get_token_dataset(t) if dataset in datasets: datasets[dataset].append(t) else: datasets[dataset] = [t] return datasets
[ "NOTE", ":", "VERY", "SLOW!", "Get", "a", "dictionary", "relating", "key", ":", "dataset", "to", "value", ":", "[", "tokens", "]", "that", "rely", "on", "that", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L70-L90
[ "def", "get_public_datasets_and_tokens", "(", "self", ")", ":", "datasets", "=", "{", "}", "tokens", "=", "self", ".", "get_public_tokens", "(", ")", "for", "t", "in", "tokens", ":", "dataset", "=", "self", ".", "get_token_dataset", "(", "t", ")", "if", "dataset", "in", "datasets", ":", "datasets", "[", "dataset", "]", ".", "append", "(", "t", ")", "else", ":", "datasets", "[", "dataset", "]", "=", "[", "t", "]", "return", "datasets" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
metadata.get_proj_info
Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info
ndio/remote/metadata.py
def get_proj_info(self, token): """ Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info """ r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
def get_proj_info(self, token): """ Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info """ r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
[ "Return", "the", "project", "info", "for", "a", "given", "token", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L104-L115
[ "def", "get_proj_info", "(", "self", ",", "token", ")", ":", "r", "=", "self", ".", "remote_utils", ".", "get_url", "(", "self", ".", "url", "(", ")", "+", "\"{}/info/\"", ".", "format", "(", "token", ")", ")", "return", "r", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
metadata.get_image_size
Return the size of the volume (3D). Convenient for when you want to download the entirety of a dataset. Arguments: token (str): The token for which to find the dataset image bounds resolution (int : 0): The resolution at which to get image bounds. Defaults to 0, to get the largest area available. Returns: int[3]: The size of the bounds. Should == get_volume.shape Raises: RemoteDataNotFoundError: If the token is invalid, or if the metadata at that resolution is unavailable in projinfo.
ndio/remote/metadata.py
def get_image_size(self, token, resolution=0): """ Return the size of the volume (3D). Convenient for when you want to download the entirety of a dataset. Arguments: token (str): The token for which to find the dataset image bounds resolution (int : 0): The resolution at which to get image bounds. Defaults to 0, to get the largest area available. Returns: int[3]: The size of the bounds. Should == get_volume.shape Raises: RemoteDataNotFoundError: If the token is invalid, or if the metadata at that resolution is unavailable in projinfo. """ info = self.get_proj_info(token) res = str(resolution) if res not in info['dataset']['imagesize']: raise RemoteDataNotFoundError("Resolution " + res + " is not available.") return info['dataset']['imagesize'][str(resolution)]
def get_image_size(self, token, resolution=0): """ Return the size of the volume (3D). Convenient for when you want to download the entirety of a dataset. Arguments: token (str): The token for which to find the dataset image bounds resolution (int : 0): The resolution at which to get image bounds. Defaults to 0, to get the largest area available. Returns: int[3]: The size of the bounds. Should == get_volume.shape Raises: RemoteDataNotFoundError: If the token is invalid, or if the metadata at that resolution is unavailable in projinfo. """ info = self.get_proj_info(token) res = str(resolution) if res not in info['dataset']['imagesize']: raise RemoteDataNotFoundError("Resolution " + res + " is not available.") return info['dataset']['imagesize'][str(resolution)]
[ "Return", "the", "size", "of", "the", "volume", "(", "3D", ")", ".", "Convenient", "for", "when", "you", "want", "to", "download", "the", "entirety", "of", "a", "dataset", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L136-L158
[ "def", "get_image_size", "(", "self", ",", "token", ",", "resolution", "=", "0", ")", ":", "info", "=", "self", ".", "get_proj_info", "(", "token", ")", "res", "=", "str", "(", "resolution", ")", "if", "res", "not", "in", "info", "[", "'dataset'", "]", "[", "'imagesize'", "]", ":", "raise", "RemoteDataNotFoundError", "(", "\"Resolution \"", "+", "res", "+", "\" is not available.\"", ")", "return", "info", "[", "'dataset'", "]", "[", "'imagesize'", "]", "[", "str", "(", "resolution", ")", "]" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
metadata.set_metadata
Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key.
ndio/remote/metadata.py
def set_metadata(self, token, data): """ Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key. """ req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json()
def set_metadata(self, token, data): """ Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key. """ req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json()
[ "Insert", "new", "metadata", "into", "the", "OCP", "metadata", "database", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L160-L182
[ "def", "set_metadata", "(", "self", ",", "token", ",", "data", ")", ":", "req", "=", "requests", ".", "post", "(", "self", ".", "meta_url", "(", "\"metadata/ocp/set/\"", "+", "token", ")", ",", "json", "=", "data", ",", "verify", "=", "False", ")", "if", "req", ".", "status_code", "!=", "200", ":", "raise", "RemoteDataUploadError", "(", "\"Could not upload metadata: \"", "+", "req", ".", "json", "(", ")", "[", "'message'", "]", ")", "return", "req", ".", "json", "(", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
metadata.add_subvolume
Adds a new subvolume to a token/channel. Arguments: token (str): The token to write to in LIMS channel (str): Channel to add in the subvolume. Can be `None` x_start (int): Start in x dimension x_stop (int): Stop in x dimension y_start (int): Start in y dimension y_stop (int): Stop in y dimension z_start (int): Start in z dimension z_stop (int): Stop in z dimension resolution (int): The resolution at which this subvolume is seen title (str): The title to set for the subvolume notes (str): Optional extra thoughts on the subvolume Returns: boolean: success
ndio/remote/metadata.py
def add_subvolume(self, token, channel, secret, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, title, notes): """ Adds a new subvolume to a token/channel. Arguments: token (str): The token to write to in LIMS channel (str): Channel to add in the subvolume. Can be `None` x_start (int): Start in x dimension x_stop (int): Stop in x dimension y_start (int): Start in y dimension y_stop (int): Stop in y dimension z_start (int): Start in z dimension z_stop (int): Stop in z dimension resolution (int): The resolution at which this subvolume is seen title (str): The title to set for the subvolume notes (str): Optional extra thoughts on the subvolume Returns: boolean: success """ md = self.get_metadata(token)['metadata'] if 'subvolumes' in md: subvols = md['subvolumes'] else: subvols = [] subvols.append({ 'token': token, 'channel': channel, 'x_start': x_start, 'x_stop': x_stop, 'y_start': y_start, 'y_stop': y_stop, 'z_start': z_start, 'z_stop': z_stop, 'resolution': resolution, 'title': title, 'notes': notes }) return self.set_metadata(token, { 'secret': secret, 'subvolumes': subvols })
def add_subvolume(self, token, channel, secret, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, title, notes): """ Adds a new subvolume to a token/channel. Arguments: token (str): The token to write to in LIMS channel (str): Channel to add in the subvolume. Can be `None` x_start (int): Start in x dimension x_stop (int): Stop in x dimension y_start (int): Start in y dimension y_stop (int): Stop in y dimension z_start (int): Start in z dimension z_stop (int): Stop in z dimension resolution (int): The resolution at which this subvolume is seen title (str): The title to set for the subvolume notes (str): Optional extra thoughts on the subvolume Returns: boolean: success """ md = self.get_metadata(token)['metadata'] if 'subvolumes' in md: subvols = md['subvolumes'] else: subvols = [] subvols.append({ 'token': token, 'channel': channel, 'x_start': x_start, 'x_stop': x_stop, 'y_start': y_start, 'y_stop': y_stop, 'z_start': z_start, 'z_stop': z_stop, 'resolution': resolution, 'title': title, 'notes': notes }) return self.set_metadata(token, { 'secret': secret, 'subvolumes': subvols })
[ "Adds", "a", "new", "subvolume", "to", "a", "token", "/", "channel", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L200-L247
[ "def", "add_subvolume", "(", "self", ",", "token", ",", "channel", ",", "secret", ",", "x_start", ",", "x_stop", ",", "y_start", ",", "y_stop", ",", "z_start", ",", "z_stop", ",", "resolution", ",", "title", ",", "notes", ")", ":", "md", "=", "self", ".", "get_metadata", "(", "token", ")", "[", "'metadata'", "]", "if", "'subvolumes'", "in", "md", ":", "subvols", "=", "md", "[", "'subvolumes'", "]", "else", ":", "subvols", "=", "[", "]", "subvols", ".", "append", "(", "{", "'token'", ":", "token", ",", "'channel'", ":", "channel", ",", "'x_start'", ":", "x_start", ",", "'x_stop'", ":", "x_stop", ",", "'y_start'", ":", "y_start", ",", "'y_stop'", ":", "y_stop", ",", "'z_start'", ":", "z_start", ",", "'z_stop'", ":", "z_stop", ",", "'resolution'", ":", "resolution", ",", "'title'", ":", "title", ",", "'notes'", ":", "notes", "}", ")", "return", "self", ".", "set_metadata", "(", "token", ",", "{", "'secret'", ":", "secret", ",", "'subvolumes'", ":", "subvols", "}", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
remote_utils.get_url
Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object
ndio/remote/remote_utils.py
def get_url(self, url): """ Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object """ try: req = requests.get(url, headers={ 'Authorization': 'Token {}'.format(self._user_token) }, verify=False) if req.status_code is 403: raise ValueError("Access Denied") else: return req except requests.exceptions.ConnectionError as e: if str(e) == '403 Client Error: Forbidden': raise ValueError('Access Denied') else: raise e
def get_url(self, url): """ Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object """ try: req = requests.get(url, headers={ 'Authorization': 'Token {}'.format(self._user_token) }, verify=False) if req.status_code is 403: raise ValueError("Access Denied") else: return req except requests.exceptions.ConnectionError as e: if str(e) == '403 Client Error: Forbidden': raise ValueError('Access Denied') else: raise e
[ "Get", "a", "response", "object", "for", "a", "given", "url", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/remote_utils.py#L19-L42
[ "def", "get_url", "(", "self", ",", "url", ")", ":", "try", ":", "req", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "'Authorization'", ":", "'Token {}'", ".", "format", "(", "self", ".", "_user_token", ")", "}", ",", "verify", "=", "False", ")", "if", "req", ".", "status_code", "is", "403", ":", "raise", "ValueError", "(", "\"Access Denied\"", ")", "else", ":", "return", "req", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "if", "str", "(", "e", ")", "==", "'403 Client Error: Forbidden'", ":", "raise", "ValueError", "(", "'Access Denied'", ")", "else", ":", "raise", "e" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
remote_utils.post_url
Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object
ndio/remote/remote_utils.py
def post_url(self, url, token='', json=None, data=None, headers=None): """ Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object """ if (token == ''): token = self._user_token if headers: headers.update({'Authorization': 'Token {}'.format(token)}) else: headers = {'Authorization': 'Token {}'.format(token)} if json: return requests.post(url, headers=headers, json=json, verify=False) if data: return requests.post(url, headers=headers, data=data, verify=False) return requests.post(url, headers=headers, verify=False)
def post_url(self, url, token='', json=None, data=None, headers=None): """ Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object """ if (token == ''): token = self._user_token if headers: headers.update({'Authorization': 'Token {}'.format(token)}) else: headers = {'Authorization': 'Token {}'.format(token)} if json: return requests.post(url, headers=headers, json=json, verify=False) if data: return requests.post(url, headers=headers, data=data, verify=False) return requests.post(url, headers=headers, verify=False)
[ "Returns", "a", "post", "resquest", "object", "taking", "in", "a", "url", "user", "token", "and", "possible", "json", "information", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/remote_utils.py#L44-L78
[ "def", "post_url", "(", "self", ",", "url", ",", "token", "=", "''", ",", "json", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ")", ":", "if", "(", "token", "==", "''", ")", ":", "token", "=", "self", ".", "_user_token", "if", "headers", ":", "headers", ".", "update", "(", "{", "'Authorization'", ":", "'Token {}'", ".", "format", "(", "token", ")", "}", ")", "else", ":", "headers", "=", "{", "'Authorization'", ":", "'Token {}'", ".", "format", "(", "token", ")", "}", "if", "json", ":", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "json", "=", "json", ",", "verify", "=", "False", ")", "if", "data", ":", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "verify", "=", "False", ")", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "verify", "=", "False", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
remote_utils.delete_url
Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object
ndio/remote/remote_utils.py
def delete_url(self, url, token=''): """ Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object """ if (token == ''): token = self._user_token return requests.delete(url, headers={ 'Authorization': 'Token {}'.format(token)}, verify=False,)
def delete_url(self, url, token=''): """ Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object """ if (token == ''): token = self._user_token return requests.delete(url, headers={ 'Authorization': 'Token {}'.format(token)}, verify=False,)
[ "Returns", "a", "delete", "resquest", "object", "taking", "in", "a", "url", "and", "user", "token", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/remote_utils.py#L80-L97
[ "def", "delete_url", "(", "self", ",", "url", ",", "token", "=", "''", ")", ":", "if", "(", "token", "==", "''", ")", ":", "token", "=", "self", ".", "_user_token", "return", "requests", ".", "delete", "(", "url", ",", "headers", "=", "{", "'Authorization'", ":", "'Token {}'", ".", "format", "(", "token", ")", "}", ",", "verify", "=", "False", ",", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
remote_utils.ping
Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code)
ndio/remote/remote_utils.py
def ping(self, url, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = self.get_url(url + "/" + endpoint) return r.status_code
def ping(self, url, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = self.get_url(url + "/" + endpoint) return r.status_code
[ "Ping", "the", "server", "to", "make", "sure", "that", "you", "can", "access", "the", "base", "URL", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/remote_utils.py#L99-L109
[ "def", "ping", "(", "self", ",", "url", ",", "endpoint", "=", "''", ")", ":", "r", "=", "self", ".", "get_url", "(", "url", "+", "\"/\"", "+", "endpoint", ")", "return", "r", ".", "status_code" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
load
Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file
ndio/convert/hdf5.py
def load(hdf5_filename): """ Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: f = h5py.File(hdf5_filename, "r") # neurodata stores data inside the 'cutout' h5 dataset data_layers = f.get('image').get('CUTOUT') except Exception as e: raise ValueError("Could not load file {0} for conversion. {}".format( hdf5_filename, e)) raise return numpy.array(data_layers)
def load(hdf5_filename): """ Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: f = h5py.File(hdf5_filename, "r") # neurodata stores data inside the 'cutout' h5 dataset data_layers = f.get('image').get('CUTOUT') except Exception as e: raise ValueError("Could not load file {0} for conversion. {}".format( hdf5_filename, e)) raise return numpy.array(data_layers)
[ "Import", "a", "HDF5", "file", "into", "a", "numpy", "array", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/hdf5.py#L7-L29
[ "def", "load", "(", "hdf5_filename", ")", ":", "# Expand filename to be absolute", "hdf5_filename", "=", "os", ".", "path", ".", "expanduser", "(", "hdf5_filename", ")", "try", ":", "f", "=", "h5py", ".", "File", "(", "hdf5_filename", ",", "\"r\"", ")", "# neurodata stores data inside the 'cutout' h5 dataset", "data_layers", "=", "f", ".", "get", "(", "'image'", ")", ".", "get", "(", "'CUTOUT'", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not load file {0} for conversion. {}\"", ".", "format", "(", "hdf5_filename", ",", "e", ")", ")", "raise", "return", "numpy", ".", "array", "(", "data_layers", ")" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
save
Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data
ndio/convert/hdf5.py
def save(hdf5_filename, array): """ Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: h = h5py.File(hdf5_filename, "w") h.create_dataset('CUTOUT', data=array) h.close() except Exception as e: raise ValueError("Could not save HDF5 file {0}.".format(hdf5_filename)) return hdf5_filename
def save(hdf5_filename, array): """ Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: h = h5py.File(hdf5_filename, "w") h.create_dataset('CUTOUT', data=array) h.close() except Exception as e: raise ValueError("Could not save HDF5 file {0}.".format(hdf5_filename)) return hdf5_filename
[ "Export", "a", "numpy", "array", "to", "a", "HDF5", "file", "." ]
neurodata/ndio
python
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/hdf5.py#L32-L53
[ "def", "save", "(", "hdf5_filename", ",", "array", ")", ":", "# Expand filename to be absolute", "hdf5_filename", "=", "os", ".", "path", ".", "expanduser", "(", "hdf5_filename", ")", "try", ":", "h", "=", "h5py", ".", "File", "(", "hdf5_filename", ",", "\"w\"", ")", "h", ".", "create_dataset", "(", "'CUTOUT'", ",", "data", "=", "array", ")", "h", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not save HDF5 file {0}.\"", ".", "format", "(", "hdf5_filename", ")", ")", "return", "hdf5_filename" ]
792dd5816bc770b05a3db2f4327da42ff6253531
test
ProcessExecutor.run
return values of execute are set as result of the task returned by ensure_future(), obtainable via task.result()
ribosome/process.py
def run(self, job: Job) -> Future[Result]: ''' return values of execute are set as result of the task returned by ensure_future(), obtainable via task.result() ''' if not self.watcher_ready: self.log.error(f'child watcher unattached when executing {job}') job.cancel('unattached watcher') elif not self.can_execute(job): self.log.error('invalid execution job: {}'.format(job)) job.cancel('invalid') else: self.log.debug('executing {}'.format(job)) task = asyncio.ensure_future(self._execute(job), loop=self.loop) task.add_done_callback(job.finish) task.add_done_callback(L(self.job_done)(job, _)) self.current[job.client] = job return job.status
def run(self, job: Job) -> Future[Result]: ''' return values of execute are set as result of the task returned by ensure_future(), obtainable via task.result() ''' if not self.watcher_ready: self.log.error(f'child watcher unattached when executing {job}') job.cancel('unattached watcher') elif not self.can_execute(job): self.log.error('invalid execution job: {}'.format(job)) job.cancel('invalid') else: self.log.debug('executing {}'.format(job)) task = asyncio.ensure_future(self._execute(job), loop=self.loop) task.add_done_callback(job.finish) task.add_done_callback(L(self.job_done)(job, _)) self.current[job.client] = job return job.status
[ "return", "values", "of", "execute", "are", "set", "as", "result", "of", "the", "task", "returned", "by", "ensure_future", "()", "obtainable", "via", "task", ".", "result", "()" ]
tek/ribosome
python
https://github.com/tek/ribosome/blob/b2ce9e118faa46d93506cbbb5f27ecfbd4e8a1cc/ribosome/process.py#L170-L186
[ "def", "run", "(", "self", ",", "job", ":", "Job", ")", "->", "Future", "[", "Result", "]", ":", "if", "not", "self", ".", "watcher_ready", ":", "self", ".", "log", ".", "error", "(", "f'child watcher unattached when executing {job}'", ")", "job", ".", "cancel", "(", "'unattached watcher'", ")", "elif", "not", "self", ".", "can_execute", "(", "job", ")", ":", "self", ".", "log", ".", "error", "(", "'invalid execution job: {}'", ".", "format", "(", "job", ")", ")", "job", ".", "cancel", "(", "'invalid'", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "'executing {}'", ".", "format", "(", "job", ")", ")", "task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "_execute", "(", "job", ")", ",", "loop", "=", "self", ".", "loop", ")", "task", ".", "add_done_callback", "(", "job", ".", "finish", ")", "task", ".", "add_done_callback", "(", "L", "(", "self", ".", "job_done", ")", "(", "job", ",", "_", ")", ")", "self", ".", "current", "[", "job", ".", "client", "]", "=", "job", "return", "job", ".", "status" ]
b2ce9e118faa46d93506cbbb5f27ecfbd4e8a1cc
test
infer_gaps_in_tree
Adds a character matrix to DendroPy tree and infers gaps using Fitch's algorithm. Infer gaps in sequences at ancestral nodes.
pyasr/gaps.py
def infer_gaps_in_tree(df_seq, tree, id_col='id', sequence_col='sequence'): """Adds a character matrix to DendroPy tree and infers gaps using Fitch's algorithm. Infer gaps in sequences at ancestral nodes. """ taxa = tree.taxon_namespace # Get alignment as fasta alignment = df_seq.phylo.to_fasta(id_col=id_col, id_only=True, sequence_col=sequence_col) # Build a Sequence data matrix from Dendropy data = dendropy.ProteinCharacterMatrix.get( data=alignment, schema="fasta", taxon_namespace=taxa) # Construct a map object between sequence data and tree data. taxon_state_sets_map = data.taxon_state_sets_map(gaps_as_missing=False) # Fitch algorithm to determine placement of gaps dendropy.model.parsimony.fitch_down_pass(tree.postorder_node_iter(), taxon_state_sets_map=taxon_state_sets_map) dendropy.model.parsimony.fitch_up_pass(tree.preorder_node_iter()) return tree
def infer_gaps_in_tree(df_seq, tree, id_col='id', sequence_col='sequence'): """Adds a character matrix to DendroPy tree and infers gaps using Fitch's algorithm. Infer gaps in sequences at ancestral nodes. """ taxa = tree.taxon_namespace # Get alignment as fasta alignment = df_seq.phylo.to_fasta(id_col=id_col, id_only=True, sequence_col=sequence_col) # Build a Sequence data matrix from Dendropy data = dendropy.ProteinCharacterMatrix.get( data=alignment, schema="fasta", taxon_namespace=taxa) # Construct a map object between sequence data and tree data. taxon_state_sets_map = data.taxon_state_sets_map(gaps_as_missing=False) # Fitch algorithm to determine placement of gaps dendropy.model.parsimony.fitch_down_pass(tree.postorder_node_iter(), taxon_state_sets_map=taxon_state_sets_map) dendropy.model.parsimony.fitch_up_pass(tree.preorder_node_iter()) return tree
[ "Adds", "a", "character", "matrix", "to", "DendroPy", "tree", "and", "infers", "gaps", "using", "Fitch", "s", "algorithm", "." ]
Zsailer/pyasr
python
https://github.com/Zsailer/pyasr/blob/f9a05912ae2409a4cb2d8bac878bb5230c8824b4/pyasr/gaps.py#L3-L28
[ "def", "infer_gaps_in_tree", "(", "df_seq", ",", "tree", ",", "id_col", "=", "'id'", ",", "sequence_col", "=", "'sequence'", ")", ":", "taxa", "=", "tree", ".", "taxon_namespace", "# Get alignment as fasta", "alignment", "=", "df_seq", ".", "phylo", ".", "to_fasta", "(", "id_col", "=", "id_col", ",", "id_only", "=", "True", ",", "sequence_col", "=", "sequence_col", ")", "# Build a Sequence data matrix from Dendropy", "data", "=", "dendropy", ".", "ProteinCharacterMatrix", ".", "get", "(", "data", "=", "alignment", ",", "schema", "=", "\"fasta\"", ",", "taxon_namespace", "=", "taxa", ")", "# Construct a map object between sequence data and tree data.", "taxon_state_sets_map", "=", "data", ".", "taxon_state_sets_map", "(", "gaps_as_missing", "=", "False", ")", "# Fitch algorithm to determine placement of gaps", "dendropy", ".", "model", ".", "parsimony", ".", "fitch_down_pass", "(", "tree", ".", "postorder_node_iter", "(", ")", ",", "taxon_state_sets_map", "=", "taxon_state_sets_map", ")", "dendropy", ".", "model", ".", "parsimony", ".", "fitch_up_pass", "(", "tree", ".", "preorder_node_iter", "(", ")", ")", "return", "tree" ]
f9a05912ae2409a4cb2d8bac878bb5230c8824b4