id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
12,300
thunder-project/thunder
thunder/images/images.py
Images.subtract
def subtract(self, val): """ Subtract a constant value or an image from all images. Parameters ---------- val : int, float, or ndarray Value to subtract. """ if isinstance(val, ndarray): if val.shape != self.value_shape: raise Exception('Cannot subtract image with dimensions %s ' 'from images with dimension %s' % (str(val.shape), str(self.value_shape))) return self.map(lambda x: x - val, value_shape=self.value_shape)
python
def subtract(self, val): """ Subtract a constant value or an image from all images. Parameters ---------- val : int, float, or ndarray Value to subtract. """ if isinstance(val, ndarray): if val.shape != self.value_shape: raise Exception('Cannot subtract image with dimensions %s ' 'from images with dimension %s' % (str(val.shape), str(self.value_shape))) return self.map(lambda x: x - val, value_shape=self.value_shape)
[ "def", "subtract", "(", "self", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "ndarray", ")", ":", "if", "val", ".", "shape", "!=", "self", ".", "value_shape", ":", "raise", "Exception", "(", "'Cannot subtract image with dimensions %s '", "'from images with dimension %s'", "%", "(", "str", "(", "val", ".", "shape", ")", ",", "str", "(", "self", ".", "value_shape", ")", ")", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "-", "val", ",", "value_shape", "=", "self", ".", "value_shape", ")" ]
Subtract a constant value or an image from all images. Parameters ---------- val : int, float, or ndarray Value to subtract.
[ "Subtract", "a", "constant", "value", "or", "an", "image", "from", "all", "images", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L456-L470
12,301
thunder-project/thunder
thunder/images/images.py
Images.topng
def topng(self, path, prefix='image', overwrite=False): """ Write 2d images as PNG files. Files will be written into a newly-created directory. Three-dimensional data will be treated as RGB channels. Parameters ---------- path : string Path to output directory, must be one level below an existing directory. prefix : string String to prepend to filenames. overwrite : bool If true, the directory given by path will first be deleted if it exists. """ from thunder.images.writers import topng # TODO add back colormap and vmin/vmax topng(self, path, prefix=prefix, overwrite=overwrite)
python
def topng(self, path, prefix='image', overwrite=False): """ Write 2d images as PNG files. Files will be written into a newly-created directory. Three-dimensional data will be treated as RGB channels. Parameters ---------- path : string Path to output directory, must be one level below an existing directory. prefix : string String to prepend to filenames. overwrite : bool If true, the directory given by path will first be deleted if it exists. """ from thunder.images.writers import topng # TODO add back colormap and vmin/vmax topng(self, path, prefix=prefix, overwrite=overwrite)
[ "def", "topng", "(", "self", ",", "path", ",", "prefix", "=", "'image'", ",", "overwrite", "=", "False", ")", ":", "from", "thunder", ".", "images", ".", "writers", "import", "topng", "# TODO add back colormap and vmin/vmax", "topng", "(", "self", ",", "path", ",", "prefix", "=", "prefix", ",", "overwrite", "=", "overwrite", ")" ]
Write 2d images as PNG files. Files will be written into a newly-created directory. Three-dimensional data will be treated as RGB channels. Parameters ---------- path : string Path to output directory, must be one level below an existing directory. prefix : string String to prepend to filenames. overwrite : bool If true, the directory given by path will first be deleted if it exists.
[ "Write", "2d", "images", "as", "PNG", "files", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L472-L492
12,302
thunder-project/thunder
thunder/images/images.py
Images.map_as_series
def map_as_series(self, func, value_size=None, dtype=None, chunk_size='auto'): """ Efficiently apply a function to images as series data. For images data that represent image sequences, this method applies a function to each pixel's series, and then returns to the images format, using an efficient intermediate block representation. Parameters ---------- func : function Function to apply to each time series. Should take one-dimensional ndarray and return the transformed one-dimensional ndarray. value_size : int, optional, default = None Size of the one-dimensional ndarray resulting from application of func. If not supplied, will be automatically inferred for an extra computational cost. dtype : str, optional, default = None dtype of one-dimensional ndarray resulting from application of func. If not supplied it will be automatically inferred for an extra computational cost. chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block. Tuple of ints interpreted as 'pixels per dimension'. """ blocks = self.toblocks(chunk_size=chunk_size) if value_size is not None: dims = list(blocks.blockshape) dims[0] = value_size else: dims = None def f(block): return apply_along_axis(func, 0, block) return blocks.map(f, value_shape=dims, dtype=dtype).toimages()
python
def map_as_series(self, func, value_size=None, dtype=None, chunk_size='auto'): """ Efficiently apply a function to images as series data. For images data that represent image sequences, this method applies a function to each pixel's series, and then returns to the images format, using an efficient intermediate block representation. Parameters ---------- func : function Function to apply to each time series. Should take one-dimensional ndarray and return the transformed one-dimensional ndarray. value_size : int, optional, default = None Size of the one-dimensional ndarray resulting from application of func. If not supplied, will be automatically inferred for an extra computational cost. dtype : str, optional, default = None dtype of one-dimensional ndarray resulting from application of func. If not supplied it will be automatically inferred for an extra computational cost. chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block. Tuple of ints interpreted as 'pixels per dimension'. """ blocks = self.toblocks(chunk_size=chunk_size) if value_size is not None: dims = list(blocks.blockshape) dims[0] = value_size else: dims = None def f(block): return apply_along_axis(func, 0, block) return blocks.map(f, value_shape=dims, dtype=dtype).toimages()
[ "def", "map_as_series", "(", "self", ",", "func", ",", "value_size", "=", "None", ",", "dtype", "=", "None", ",", "chunk_size", "=", "'auto'", ")", ":", "blocks", "=", "self", ".", "toblocks", "(", "chunk_size", "=", "chunk_size", ")", "if", "value_size", "is", "not", "None", ":", "dims", "=", "list", "(", "blocks", ".", "blockshape", ")", "dims", "[", "0", "]", "=", "value_size", "else", ":", "dims", "=", "None", "def", "f", "(", "block", ")", ":", "return", "apply_along_axis", "(", "func", ",", "0", ",", "block", ")", "return", "blocks", ".", "map", "(", "f", ",", "value_shape", "=", "dims", ",", "dtype", "=", "dtype", ")", ".", "toimages", "(", ")" ]
Efficiently apply a function to images as series data. For images data that represent image sequences, this method applies a function to each pixel's series, and then returns to the images format, using an efficient intermediate block representation. Parameters ---------- func : function Function to apply to each time series. Should take one-dimensional ndarray and return the transformed one-dimensional ndarray. value_size : int, optional, default = None Size of the one-dimensional ndarray resulting from application of func. If not supplied, will be automatically inferred for an extra computational cost. dtype : str, optional, default = None dtype of one-dimensional ndarray resulting from application of func. If not supplied it will be automatically inferred for an extra computational cost. chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block. Tuple of ints interpreted as 'pixels per dimension'.
[ "Efficiently", "apply", "a", "function", "to", "images", "as", "series", "data", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L536-L577
12,303
thunder-project/thunder
thunder/blocks/blocks.py
Blocks.count
def count(self): """ Explicit count of the number of items. For lazy or distributed data, will force a computation. """ if self.mode == 'spark': return self.tordd().count() if self.mode == 'local': return prod(self.values.values.shape)
python
def count(self): """ Explicit count of the number of items. For lazy or distributed data, will force a computation. """ if self.mode == 'spark': return self.tordd().count() if self.mode == 'local': return prod(self.values.values.shape)
[ "def", "count", "(", "self", ")", ":", "if", "self", ".", "mode", "==", "'spark'", ":", "return", "self", ".", "tordd", "(", ")", ".", "count", "(", ")", "if", "self", ".", "mode", "==", "'local'", ":", "return", "prod", "(", "self", ".", "values", ".", "values", ".", "shape", ")" ]
Explicit count of the number of items. For lazy or distributed data, will force a computation.
[ "Explicit", "count", "of", "the", "number", "of", "items", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L30-L40
12,304
thunder-project/thunder
thunder/blocks/blocks.py
Blocks.collect_blocks
def collect_blocks(self): """ Collect the blocks in a list """ if self.mode == 'spark': return self.values.tordd().sortByKey().values().collect() if self.mode == 'local': return self.values.values.flatten().tolist()
python
def collect_blocks(self): """ Collect the blocks in a list """ if self.mode == 'spark': return self.values.tordd().sortByKey().values().collect() if self.mode == 'local': return self.values.values.flatten().tolist()
[ "def", "collect_blocks", "(", "self", ")", ":", "if", "self", ".", "mode", "==", "'spark'", ":", "return", "self", ".", "values", ".", "tordd", "(", ")", ".", "sortByKey", "(", ")", ".", "values", "(", ")", ".", "collect", "(", ")", "if", "self", ".", "mode", "==", "'local'", ":", "return", "self", ".", "values", ".", "values", ".", "flatten", "(", ")", ".", "tolist", "(", ")" ]
Collect the blocks in a list
[ "Collect", "the", "blocks", "in", "a", "list" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L42-L50
12,305
thunder-project/thunder
thunder/blocks/blocks.py
Blocks.map
def map(self, func, value_shape=None, dtype=None): """ Apply an array -> array function to each block """ mapped = self.values.map(func, value_shape=value_shape, dtype=dtype) return self._constructor(mapped).__finalize__(self, noprop=('dtype',))
python
def map(self, func, value_shape=None, dtype=None): """ Apply an array -> array function to each block """ mapped = self.values.map(func, value_shape=value_shape, dtype=dtype) return self._constructor(mapped).__finalize__(self, noprop=('dtype',))
[ "def", "map", "(", "self", ",", "func", ",", "value_shape", "=", "None", ",", "dtype", "=", "None", ")", ":", "mapped", "=", "self", ".", "values", ".", "map", "(", "func", ",", "value_shape", "=", "value_shape", ",", "dtype", "=", "dtype", ")", "return", "self", ".", "_constructor", "(", "mapped", ")", ".", "__finalize__", "(", "self", ",", "noprop", "=", "(", "'dtype'", ",", ")", ")" ]
Apply an array -> array function to each block
[ "Apply", "an", "array", "-", ">", "array", "function", "to", "each", "block" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L52-L57
12,306
thunder-project/thunder
thunder/blocks/blocks.py
Blocks.toimages
def toimages(self): """ Convert blocks to images. """ from thunder.images.images import Images if self.mode == 'spark': values = self.values.values_to_keys((0,)).unchunk() if self.mode == 'local': values = self.values.unchunk() return Images(values)
python
def toimages(self): """ Convert blocks to images. """ from thunder.images.images import Images if self.mode == 'spark': values = self.values.values_to_keys((0,)).unchunk() if self.mode == 'local': values = self.values.unchunk() return Images(values)
[ "def", "toimages", "(", "self", ")", ":", "from", "thunder", ".", "images", ".", "images", "import", "Images", "if", "self", ".", "mode", "==", "'spark'", ":", "values", "=", "self", ".", "values", ".", "values_to_keys", "(", "(", "0", ",", ")", ")", ".", "unchunk", "(", ")", "if", "self", ".", "mode", "==", "'local'", ":", "values", "=", "self", ".", "values", ".", "unchunk", "(", ")", "return", "Images", "(", "values", ")" ]
Convert blocks to images.
[ "Convert", "blocks", "to", "images", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L75-L87
12,307
thunder-project/thunder
thunder/blocks/blocks.py
Blocks.toseries
def toseries(self): """ Converts blocks to series. """ from thunder.series.series import Series if self.mode == 'spark': values = self.values.values_to_keys(tuple(range(1, len(self.shape)))).unchunk() if self.mode == 'local': values = self.values.unchunk() values = rollaxis(values, 0, values.ndim) return Series(values)
python
def toseries(self): """ Converts blocks to series. """ from thunder.series.series import Series if self.mode == 'spark': values = self.values.values_to_keys(tuple(range(1, len(self.shape)))).unchunk() if self.mode == 'local': values = self.values.unchunk() values = rollaxis(values, 0, values.ndim) return Series(values)
[ "def", "toseries", "(", "self", ")", ":", "from", "thunder", ".", "series", ".", "series", "import", "Series", "if", "self", ".", "mode", "==", "'spark'", ":", "values", "=", "self", ".", "values", ".", "values_to_keys", "(", "tuple", "(", "range", "(", "1", ",", "len", "(", "self", ".", "shape", ")", ")", ")", ")", ".", "unchunk", "(", ")", "if", "self", ".", "mode", "==", "'local'", ":", "values", "=", "self", ".", "values", ".", "unchunk", "(", ")", "values", "=", "rollaxis", "(", "values", ",", "0", ",", "values", ".", "ndim", ")", "return", "Series", "(", "values", ")" ]
Converts blocks to series.
[ "Converts", "blocks", "to", "series", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L89-L102
12,308
thunder-project/thunder
thunder/blocks/blocks.py
Blocks.toarray
def toarray(self): """ Convert blocks to local ndarray """ if self.mode == 'spark': return self.values.unchunk().toarray() if self.mode == 'local': return self.values.unchunk()
python
def toarray(self): """ Convert blocks to local ndarray """ if self.mode == 'spark': return self.values.unchunk().toarray() if self.mode == 'local': return self.values.unchunk()
[ "def", "toarray", "(", "self", ")", ":", "if", "self", ".", "mode", "==", "'spark'", ":", "return", "self", ".", "values", ".", "unchunk", "(", ")", ".", "toarray", "(", ")", "if", "self", ".", "mode", "==", "'local'", ":", "return", "self", ".", "values", ".", "unchunk", "(", ")" ]
Convert blocks to local ndarray
[ "Convert", "blocks", "to", "local", "ndarray" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L104-L112
12,309
thunder-project/thunder
thunder/series/series.py
Series.flatten
def flatten(self): """ Reshape all dimensions but the last into a single dimension """ size = prod(self.shape[:-1]) return self.reshape(size, self.shape[-1])
python
def flatten(self): """ Reshape all dimensions but the last into a single dimension """ size = prod(self.shape[:-1]) return self.reshape(size, self.shape[-1])
[ "def", "flatten", "(", "self", ")", ":", "size", "=", "prod", "(", "self", ".", "shape", "[", ":", "-", "1", "]", ")", "return", "self", ".", "reshape", "(", "size", ",", "self", ".", "shape", "[", "-", "1", "]", ")" ]
Reshape all dimensions but the last into a single dimension
[ "Reshape", "all", "dimensions", "but", "the", "last", "into", "a", "single", "dimension" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L81-L86
12,310
thunder-project/thunder
thunder/series/series.py
Series.tospark
def tospark(self, engine=None): """ Convert to spark mode. """ from thunder.series.readers import fromarray if self.mode == 'spark': logging.getLogger('thunder').warn('images already in local mode') pass if engine is None: raise ValueError('Must provide SparkContext') return fromarray(self.toarray(), index=self.index, labels=self.labels, engine=engine)
python
def tospark(self, engine=None): """ Convert to spark mode. """ from thunder.series.readers import fromarray if self.mode == 'spark': logging.getLogger('thunder').warn('images already in local mode') pass if engine is None: raise ValueError('Must provide SparkContext') return fromarray(self.toarray(), index=self.index, labels=self.labels, engine=engine)
[ "def", "tospark", "(", "self", ",", "engine", "=", "None", ")", ":", "from", "thunder", ".", "series", ".", "readers", "import", "fromarray", "if", "self", ".", "mode", "==", "'spark'", ":", "logging", ".", "getLogger", "(", "'thunder'", ")", ".", "warn", "(", "'images already in local mode'", ")", "pass", "if", "engine", "is", "None", ":", "raise", "ValueError", "(", "'Must provide SparkContext'", ")", "return", "fromarray", "(", "self", ".", "toarray", "(", ")", ",", "index", "=", "self", ".", "index", ",", "labels", "=", "self", ".", "labels", ",", "engine", "=", "engine", ")" ]
Convert to spark mode.
[ "Convert", "to", "spark", "mode", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L122-L135
12,311
thunder-project/thunder
thunder/series/series.py
Series.sample
def sample(self, n=100, seed=None): """ Extract random sample of records. Parameters ---------- n : int, optional, default = 100 The number of data points to sample. seed : int, optional, default = None Random seed. """ if n < 1: raise ValueError("Number of samples must be larger than 0, got '%g'" % n) if seed is None: seed = random.randint(0, 2 ** 32) if self.mode == 'spark': result = asarray(self.values.tordd().values().takeSample(False, n, seed)) else: basedims = [self.shape[d] for d in self.baseaxes] inds = [unravel_index(int(k), basedims) for k in random.rand(n) * prod(basedims)] result = asarray([self.values[tupleize(i) + (slice(None, None),)] for i in inds]) return self._constructor(result, index=self.index)
python
def sample(self, n=100, seed=None): """ Extract random sample of records. Parameters ---------- n : int, optional, default = 100 The number of data points to sample. seed : int, optional, default = None Random seed. """ if n < 1: raise ValueError("Number of samples must be larger than 0, got '%g'" % n) if seed is None: seed = random.randint(0, 2 ** 32) if self.mode == 'spark': result = asarray(self.values.tordd().values().takeSample(False, n, seed)) else: basedims = [self.shape[d] for d in self.baseaxes] inds = [unravel_index(int(k), basedims) for k in random.rand(n) * prod(basedims)] result = asarray([self.values[tupleize(i) + (slice(None, None),)] for i in inds]) return self._constructor(result, index=self.index)
[ "def", "sample", "(", "self", ",", "n", "=", "100", ",", "seed", "=", "None", ")", ":", "if", "n", "<", "1", ":", "raise", "ValueError", "(", "\"Number of samples must be larger than 0, got '%g'\"", "%", "n", ")", "if", "seed", "is", "None", ":", "seed", "=", "random", ".", "randint", "(", "0", ",", "2", "**", "32", ")", "if", "self", ".", "mode", "==", "'spark'", ":", "result", "=", "asarray", "(", "self", ".", "values", ".", "tordd", "(", ")", ".", "values", "(", ")", ".", "takeSample", "(", "False", ",", "n", ",", "seed", ")", ")", "else", ":", "basedims", "=", "[", "self", ".", "shape", "[", "d", "]", "for", "d", "in", "self", ".", "baseaxes", "]", "inds", "=", "[", "unravel_index", "(", "int", "(", "k", ")", ",", "basedims", ")", "for", "k", "in", "random", ".", "rand", "(", "n", ")", "*", "prod", "(", "basedims", ")", "]", "result", "=", "asarray", "(", "[", "self", ".", "values", "[", "tupleize", "(", "i", ")", "+", "(", "slice", "(", "None", ",", "None", ")", ",", ")", "]", "for", "i", "in", "inds", "]", ")", "return", "self", ".", "_constructor", "(", "result", ",", "index", "=", "self", ".", "index", ")" ]
Extract random sample of records. Parameters ---------- n : int, optional, default = 100 The number of data points to sample. seed : int, optional, default = None Random seed.
[ "Extract", "random", "sample", "of", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L137-L163
12,312
thunder-project/thunder
thunder/series/series.py
Series.map
def map(self, func, index=None, value_shape=None, dtype=None, with_keys=False): """ Map an array -> array function over each record. Parameters ---------- func : function A function of a single record. index : array-like, optional, default = None If known, the index to be used following function evaluation. value_shape : int, optional, default=None Known shape of values resulting from operation. Only valid in spark mode. dtype : numpy.dtype, optional, default = None If known, the type of the data following function evaluation. with_keys : boolean, optional, default = False If true, function should be of both tuple indices and series values. """ # if new index is given, can infer missing value_shape if value_shape is None and index is not None: value_shape = len(index) if isinstance(value_shape, int): values_shape = (value_shape, ) new = super(Series, self).map(func, value_shape=value_shape, dtype=dtype, with_keys=with_keys) if index is not None: new.index = index # if series shape did not change and no index was supplied, propagate original index else: if len(new.index) == len(self.index): new.index = self.index return new
python
def map(self, func, index=None, value_shape=None, dtype=None, with_keys=False): """ Map an array -> array function over each record. Parameters ---------- func : function A function of a single record. index : array-like, optional, default = None If known, the index to be used following function evaluation. value_shape : int, optional, default=None Known shape of values resulting from operation. Only valid in spark mode. dtype : numpy.dtype, optional, default = None If known, the type of the data following function evaluation. with_keys : boolean, optional, default = False If true, function should be of both tuple indices and series values. """ # if new index is given, can infer missing value_shape if value_shape is None and index is not None: value_shape = len(index) if isinstance(value_shape, int): values_shape = (value_shape, ) new = super(Series, self).map(func, value_shape=value_shape, dtype=dtype, with_keys=with_keys) if index is not None: new.index = index # if series shape did not change and no index was supplied, propagate original index else: if len(new.index) == len(self.index): new.index = self.index return new
[ "def", "map", "(", "self", ",", "func", ",", "index", "=", "None", ",", "value_shape", "=", "None", ",", "dtype", "=", "None", ",", "with_keys", "=", "False", ")", ":", "# if new index is given, can infer missing value_shape", "if", "value_shape", "is", "None", "and", "index", "is", "not", "None", ":", "value_shape", "=", "len", "(", "index", ")", "if", "isinstance", "(", "value_shape", ",", "int", ")", ":", "values_shape", "=", "(", "value_shape", ",", ")", "new", "=", "super", "(", "Series", ",", "self", ")", ".", "map", "(", "func", ",", "value_shape", "=", "value_shape", ",", "dtype", "=", "dtype", ",", "with_keys", "=", "with_keys", ")", "if", "index", "is", "not", "None", ":", "new", ".", "index", "=", "index", "# if series shape did not change and no index was supplied, propagate original index", "else", ":", "if", "len", "(", "new", ".", "index", ")", "==", "len", "(", "self", ".", "index", ")", ":", "new", ".", "index", "=", "self", ".", "index", "return", "new" ]
Map an array -> array function over each record. Parameters ---------- func : function A function of a single record. index : array-like, optional, default = None If known, the index to be used following function evaluation. value_shape : int, optional, default=None Known shape of values resulting from operation. Only valid in spark mode. dtype : numpy.dtype, optional, default = None If known, the type of the data following function evaluation. with_keys : boolean, optional, default = False If true, function should be of both tuple indices and series values.
[ "Map", "an", "array", "-", ">", "array", "function", "over", "each", "record", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L165-L202
12,313
thunder-project/thunder
thunder/series/series.py
Series.mean
def mean(self): """ Compute the mean across records """ return self._constructor(self.values.mean(axis=self.baseaxes, keepdims=True))
python
def mean(self): """ Compute the mean across records """ return self._constructor(self.values.mean(axis=self.baseaxes, keepdims=True))
[ "def", "mean", "(", "self", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "mean", "(", "axis", "=", "self", ".", "baseaxes", ",", "keepdims", "=", "True", ")", ")" ]
Compute the mean across records
[ "Compute", "the", "mean", "across", "records" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L215-L219
12,314
thunder-project/thunder
thunder/series/series.py
Series.sum
def sum(self): """ Compute the sum across records. """ return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True))
python
def sum(self): """ Compute the sum across records. """ return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True))
[ "def", "sum", "(", "self", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "sum", "(", "axis", "=", "self", ".", "baseaxes", ",", "keepdims", "=", "True", ")", ")" ]
Compute the sum across records.
[ "Compute", "the", "sum", "across", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L233-L237
12,315
thunder-project/thunder
thunder/series/series.py
Series.max
def max(self): """ Compute the max across records. """ return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True))
python
def max(self): """ Compute the max across records. """ return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True))
[ "def", "max", "(", "self", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "max", "(", "axis", "=", "self", ".", "baseaxes", ",", "keepdims", "=", "True", ")", ")" ]
Compute the max across records.
[ "Compute", "the", "max", "across", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L239-L243
12,316
thunder-project/thunder
thunder/series/series.py
Series.min
def min(self): """ Compute the min across records. """ return self._constructor(self.values.min(axis=self.baseaxes, keepdims=True))
python
def min(self): """ Compute the min across records. """ return self._constructor(self.values.min(axis=self.baseaxes, keepdims=True))
[ "def", "min", "(", "self", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "min", "(", "axis", "=", "self", ".", "baseaxes", ",", "keepdims", "=", "True", ")", ")" ]
Compute the min across records.
[ "Compute", "the", "min", "across", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L245-L249
12,317
thunder-project/thunder
thunder/series/series.py
Series.reshape
def reshape(self, *shape): """ Reshape the Series object Cannot change the last dimension. Parameters ---------- shape: one or more ints New shape """ if prod(self.shape) != prod(shape): raise ValueError("Reshaping must leave the number of elements unchanged") if self.shape[-1] != shape[-1]: raise ValueError("Reshaping cannot change the size of the constituent series (last dimension)") if self.labels is not None: newlabels = self.labels.reshape(*shape[:-1]) else: newlabels = None return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',))
python
def reshape(self, *shape): """ Reshape the Series object Cannot change the last dimension. Parameters ---------- shape: one or more ints New shape """ if prod(self.shape) != prod(shape): raise ValueError("Reshaping must leave the number of elements unchanged") if self.shape[-1] != shape[-1]: raise ValueError("Reshaping cannot change the size of the constituent series (last dimension)") if self.labels is not None: newlabels = self.labels.reshape(*shape[:-1]) else: newlabels = None return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',))
[ "def", "reshape", "(", "self", ",", "*", "shape", ")", ":", "if", "prod", "(", "self", ".", "shape", ")", "!=", "prod", "(", "shape", ")", ":", "raise", "ValueError", "(", "\"Reshaping must leave the number of elements unchanged\"", ")", "if", "self", ".", "shape", "[", "-", "1", "]", "!=", "shape", "[", "-", "1", "]", ":", "raise", "ValueError", "(", "\"Reshaping cannot change the size of the constituent series (last dimension)\"", ")", "if", "self", ".", "labels", "is", "not", "None", ":", "newlabels", "=", "self", ".", "labels", ".", "reshape", "(", "*", "shape", "[", ":", "-", "1", "]", ")", "else", ":", "newlabels", "=", "None", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "reshape", "(", "shape", ")", ",", "labels", "=", "newlabels", ")", ".", "__finalize__", "(", "self", ",", "noprop", "=", "(", "'labels'", ",", ")", ")" ]
Reshape the Series object Cannot change the last dimension. Parameters ---------- shape: one or more ints New shape
[ "Reshape", "the", "Series", "object" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L251-L273
12,318
thunder-project/thunder
thunder/series/series.py
Series.between
def between(self, left, right): """ Select subset of values within the given index range. Inclusive on the left; exclusive on the right. Parameters ---------- left : int Left-most index in the desired range. right: int Right-most index in the desired range. """ crit = lambda x: left <= x < right return self.select(crit)
python
def between(self, left, right): """ Select subset of values within the given index range. Inclusive on the left; exclusive on the right. Parameters ---------- left : int Left-most index in the desired range. right: int Right-most index in the desired range. """ crit = lambda x: left <= x < right return self.select(crit)
[ "def", "between", "(", "self", ",", "left", ",", "right", ")", ":", "crit", "=", "lambda", "x", ":", "left", "<=", "x", "<", "right", "return", "self", ".", "select", "(", "crit", ")" ]
Select subset of values within the given index range. Inclusive on the left; exclusive on the right. Parameters ---------- left : int Left-most index in the desired range. right: int Right-most index in the desired range.
[ "Select", "subset", "of", "values", "within", "the", "given", "index", "range", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L275-L290
12,319
thunder-project/thunder
thunder/series/series.py
Series.select
def select(self, crit): """ Select subset of values that match a given index criterion. Parameters ---------- crit : function, list, str, int Criterion function to map to indices, specific index value, or list of indices. """ import types # handle lists, strings, and ints if not isinstance(crit, types.FunctionType): # set("foo") -> {"f", "o"}; wrap in list to prevent: if isinstance(crit, string_types): critlist = set([crit]) else: try: critlist = set(crit) except TypeError: # typically means crit is not an iterable type; for instance, crit is an int critlist = set([crit]) crit = lambda x: x in critlist # if only one index, return it directly or throw an error index = self.index if size(index) == 1: if crit(index[0]): return self else: raise Exception('No indices found matching criterion') # determine new index and check the result newindex = [i for i in index if crit(i)] if len(newindex) == 0: raise Exception('No indices found matching criterion') if array(newindex == index).all(): return self # use fast logical indexing to get the new values subinds = where([crit(i) for i in index]) new = self.map(lambda x: x[subinds], index=newindex) # if singleton, need to check whether it's an array or a scalar/int # if array, recompute a new set of indices if len(newindex) == 1: new = new.map(lambda x: x[0], index=newindex) val = new.first() if size(val) == 1: newindex = [newindex[0]] else: newindex = arange(0, size(val)) new._index = newindex return new
python
def select(self, crit): """ Select subset of values that match a given index criterion. Parameters ---------- crit : function, list, str, int Criterion function to map to indices, specific index value, or list of indices. """ import types # handle lists, strings, and ints if not isinstance(crit, types.FunctionType): # set("foo") -> {"f", "o"}; wrap in list to prevent: if isinstance(crit, string_types): critlist = set([crit]) else: try: critlist = set(crit) except TypeError: # typically means crit is not an iterable type; for instance, crit is an int critlist = set([crit]) crit = lambda x: x in critlist # if only one index, return it directly or throw an error index = self.index if size(index) == 1: if crit(index[0]): return self else: raise Exception('No indices found matching criterion') # determine new index and check the result newindex = [i for i in index if crit(i)] if len(newindex) == 0: raise Exception('No indices found matching criterion') if array(newindex == index).all(): return self # use fast logical indexing to get the new values subinds = where([crit(i) for i in index]) new = self.map(lambda x: x[subinds], index=newindex) # if singleton, need to check whether it's an array or a scalar/int # if array, recompute a new set of indices if len(newindex) == 1: new = new.map(lambda x: x[0], index=newindex) val = new.first() if size(val) == 1: newindex = [newindex[0]] else: newindex = arange(0, size(val)) new._index = newindex return new
[ "def", "select", "(", "self", ",", "crit", ")", ":", "import", "types", "# handle lists, strings, and ints", "if", "not", "isinstance", "(", "crit", ",", "types", ".", "FunctionType", ")", ":", "# set(\"foo\") -> {\"f\", \"o\"}; wrap in list to prevent:", "if", "isinstance", "(", "crit", ",", "string_types", ")", ":", "critlist", "=", "set", "(", "[", "crit", "]", ")", "else", ":", "try", ":", "critlist", "=", "set", "(", "crit", ")", "except", "TypeError", ":", "# typically means crit is not an iterable type; for instance, crit is an int", "critlist", "=", "set", "(", "[", "crit", "]", ")", "crit", "=", "lambda", "x", ":", "x", "in", "critlist", "# if only one index, return it directly or throw an error", "index", "=", "self", ".", "index", "if", "size", "(", "index", ")", "==", "1", ":", "if", "crit", "(", "index", "[", "0", "]", ")", ":", "return", "self", "else", ":", "raise", "Exception", "(", "'No indices found matching criterion'", ")", "# determine new index and check the result", "newindex", "=", "[", "i", "for", "i", "in", "index", "if", "crit", "(", "i", ")", "]", "if", "len", "(", "newindex", ")", "==", "0", ":", "raise", "Exception", "(", "'No indices found matching criterion'", ")", "if", "array", "(", "newindex", "==", "index", ")", ".", "all", "(", ")", ":", "return", "self", "# use fast logical indexing to get the new values", "subinds", "=", "where", "(", "[", "crit", "(", "i", ")", "for", "i", "in", "index", "]", ")", "new", "=", "self", ".", "map", "(", "lambda", "x", ":", "x", "[", "subinds", "]", ",", "index", "=", "newindex", ")", "# if singleton, need to check whether it's an array or a scalar/int", "# if array, recompute a new set of indices", "if", "len", "(", "newindex", ")", "==", "1", ":", "new", "=", "new", ".", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "index", "=", "newindex", ")", "val", "=", "new", ".", "first", "(", ")", "if", "size", "(", "val", ")", "==", "1", ":", "newindex", "=", "[", "newindex", "[", "0", "]", "]", "else", ":", "newindex", "=", "arange", "(", "0", ",", "size", "(", "val", ")", ")", "new", ".", "_index", "=", "newindex", "return", "new" ]
Select subset of values that match a given index criterion. Parameters ---------- crit : function, list, str, int Criterion function to map to indices, specific index value, or list of indices.
[ "Select", "subset", "of", "values", "that", "match", "a", "given", "index", "criterion", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L292-L348
12,320
thunder-project/thunder
thunder/series/series.py
Series.center
def center(self, axis=1): """ Subtract the mean either within or across records. Parameters ---------- axis : int, optional, default = 1 Which axis to center along, within (1) or across (0) records. """ if axis == 1: return self.map(lambda x: x - mean(x)) elif axis == 0: meanval = self.mean().toarray() return self.map(lambda x: x - meanval) else: raise Exception('Axis must be 0 or 1')
python
def center(self, axis=1): """ Subtract the mean either within or across records. Parameters ---------- axis : int, optional, default = 1 Which axis to center along, within (1) or across (0) records. """ if axis == 1: return self.map(lambda x: x - mean(x)) elif axis == 0: meanval = self.mean().toarray() return self.map(lambda x: x - meanval) else: raise Exception('Axis must be 0 or 1')
[ "def", "center", "(", "self", ",", "axis", "=", "1", ")", ":", "if", "axis", "==", "1", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "-", "mean", "(", "x", ")", ")", "elif", "axis", "==", "0", ":", "meanval", "=", "self", ".", "mean", "(", ")", ".", "toarray", "(", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "-", "meanval", ")", "else", ":", "raise", "Exception", "(", "'Axis must be 0 or 1'", ")" ]
Subtract the mean either within or across records. Parameters ---------- axis : int, optional, default = 1 Which axis to center along, within (1) or across (0) records.
[ "Subtract", "the", "mean", "either", "within", "or", "across", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L350-L365
12,321
thunder-project/thunder
thunder/series/series.py
Series.standardize
def standardize(self, axis=1): """ Divide by standard deviation either within or across records. Parameters ---------- axis : int, optional, default = 0 Which axis to standardize along, within (1) or across (0) records """ if axis == 1: return self.map(lambda x: x / std(x)) elif axis == 0: stdval = self.std().toarray() return self.map(lambda x: x / stdval) else: raise Exception('Axis must be 0 or 1')
python
def standardize(self, axis=1): """ Divide by standard deviation either within or across records. Parameters ---------- axis : int, optional, default = 0 Which axis to standardize along, within (1) or across (0) records """ if axis == 1: return self.map(lambda x: x / std(x)) elif axis == 0: stdval = self.std().toarray() return self.map(lambda x: x / stdval) else: raise Exception('Axis must be 0 or 1')
[ "def", "standardize", "(", "self", ",", "axis", "=", "1", ")", ":", "if", "axis", "==", "1", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "/", "std", "(", "x", ")", ")", "elif", "axis", "==", "0", ":", "stdval", "=", "self", ".", "std", "(", ")", ".", "toarray", "(", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "/", "stdval", ")", "else", ":", "raise", "Exception", "(", "'Axis must be 0 or 1'", ")" ]
Divide by standard deviation either within or across records. Parameters ---------- axis : int, optional, default = 0 Which axis to standardize along, within (1) or across (0) records
[ "Divide", "by", "standard", "deviation", "either", "within", "or", "across", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L367-L382
12,322
thunder-project/thunder
thunder/series/series.py
Series.zscore
def zscore(self, axis=1): """ Subtract the mean and divide by standard deviation within or across records. Parameters ---------- axis : int, optional, default = 0 Which axis to zscore along, within (1) or across (0) records """ if axis == 1: return self.map(lambda x: (x - mean(x)) / std(x)) elif axis == 0: meanval = self.mean().toarray() stdval = self.std().toarray() return self.map(lambda x: (x - meanval) / stdval) else: raise Exception('Axis must be 0 or 1')
python
def zscore(self, axis=1): """ Subtract the mean and divide by standard deviation within or across records. Parameters ---------- axis : int, optional, default = 0 Which axis to zscore along, within (1) or across (0) records """ if axis == 1: return self.map(lambda x: (x - mean(x)) / std(x)) elif axis == 0: meanval = self.mean().toarray() stdval = self.std().toarray() return self.map(lambda x: (x - meanval) / stdval) else: raise Exception('Axis must be 0 or 1')
[ "def", "zscore", "(", "self", ",", "axis", "=", "1", ")", ":", "if", "axis", "==", "1", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", "-", "mean", "(", "x", ")", ")", "/", "std", "(", "x", ")", ")", "elif", "axis", "==", "0", ":", "meanval", "=", "self", ".", "mean", "(", ")", ".", "toarray", "(", ")", "stdval", "=", "self", ".", "std", "(", ")", ".", "toarray", "(", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", "-", "meanval", ")", "/", "stdval", ")", "else", ":", "raise", "Exception", "(", "'Axis must be 0 or 1'", ")" ]
Subtract the mean and divide by standard deviation within or across records. Parameters ---------- axis : int, optional, default = 0 Which axis to zscore along, within (1) or across (0) records
[ "Subtract", "the", "mean", "and", "divide", "by", "standard", "deviation", "within", "or", "across", "records", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L384-L400
12,323
thunder-project/thunder
thunder/series/series.py
Series.squelch
def squelch(self, threshold): """ Set all records that do not exceed the given threhsold to 0. Parameters ---------- threshold : scalar Level below which to set records to zero """ func = lambda x: zeros(x.shape) if max(x) < threshold else x return self.map(func)
python
def squelch(self, threshold): """ Set all records that do not exceed the given threhsold to 0. Parameters ---------- threshold : scalar Level below which to set records to zero """ func = lambda x: zeros(x.shape) if max(x) < threshold else x return self.map(func)
[ "def", "squelch", "(", "self", ",", "threshold", ")", ":", "func", "=", "lambda", "x", ":", "zeros", "(", "x", ".", "shape", ")", "if", "max", "(", "x", ")", "<", "threshold", "else", "x", "return", "self", ".", "map", "(", "func", ")" ]
Set all records that do not exceed the given threhsold to 0. Parameters ---------- threshold : scalar Level below which to set records to zero
[ "Set", "all", "records", "that", "do", "not", "exceed", "the", "given", "threhsold", "to", "0", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L402-L412
12,324
thunder-project/thunder
thunder/series/series.py
Series.correlate
def correlate(self, signal): """ Correlate records against one or many one-dimensional arrays. Parameters ---------- signal : array-like One or more signals to correlate against. """ s = asarray(signal) if s.ndim == 1: if size(s) != self.shape[-1]: raise ValueError("Length of signal '%g' does not match record length '%g'" % (size(s), self.shape[-1])) return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1]) elif s.ndim == 2: if s.shape[1] != self.shape[-1]: raise ValueError("Length of signal '%g' does not match record length '%g'" % (s.shape[1], self.shape[-1])) newindex = arange(0, s.shape[0]) return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex) else: raise Exception('Signal to correlate with must have 1 or 2 dimensions')
python
def correlate(self, signal): """ Correlate records against one or many one-dimensional arrays. Parameters ---------- signal : array-like One or more signals to correlate against. """ s = asarray(signal) if s.ndim == 1: if size(s) != self.shape[-1]: raise ValueError("Length of signal '%g' does not match record length '%g'" % (size(s), self.shape[-1])) return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1]) elif s.ndim == 2: if s.shape[1] != self.shape[-1]: raise ValueError("Length of signal '%g' does not match record length '%g'" % (s.shape[1], self.shape[-1])) newindex = arange(0, s.shape[0]) return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex) else: raise Exception('Signal to correlate with must have 1 or 2 dimensions')
[ "def", "correlate", "(", "self", ",", "signal", ")", ":", "s", "=", "asarray", "(", "signal", ")", "if", "s", ".", "ndim", "==", "1", ":", "if", "size", "(", "s", ")", "!=", "self", ".", "shape", "[", "-", "1", "]", ":", "raise", "ValueError", "(", "\"Length of signal '%g' does not match record length '%g'\"", "%", "(", "size", "(", "s", ")", ",", "self", ".", "shape", "[", "-", "1", "]", ")", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "corrcoef", "(", "x", ",", "s", ")", "[", "0", ",", "1", "]", ",", "index", "=", "[", "1", "]", ")", "elif", "s", ".", "ndim", "==", "2", ":", "if", "s", ".", "shape", "[", "1", "]", "!=", "self", ".", "shape", "[", "-", "1", "]", ":", "raise", "ValueError", "(", "\"Length of signal '%g' does not match record length '%g'\"", "%", "(", "s", ".", "shape", "[", "1", "]", ",", "self", ".", "shape", "[", "-", "1", "]", ")", ")", "newindex", "=", "arange", "(", "0", ",", "s", ".", "shape", "[", "0", "]", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "array", "(", "[", "corrcoef", "(", "x", ",", "y", ")", "[", "0", ",", "1", "]", "for", "y", "in", "s", "]", ")", ",", "index", "=", "newindex", ")", "else", ":", "raise", "Exception", "(", "'Signal to correlate with must have 1 or 2 dimensions'", ")" ]
Correlate records against one or many one-dimensional arrays. Parameters ---------- signal : array-like One or more signals to correlate against.
[ "Correlate", "records", "against", "one", "or", "many", "one", "-", "dimensional", "arrays", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L414-L440
12,325
thunder-project/thunder
thunder/series/series.py
Series._check_panel
def _check_panel(self, length): """ Check that given fixed panel length evenly divides index. Parameters ---------- length : int Fixed length with which to subdivide index """ n = len(self.index) if divmod(n, length)[1] != 0: raise ValueError("Panel length '%g' must evenly divide length of series '%g'" % (length, n)) if n == length: raise ValueError("Panel length '%g' cannot be length of series '%g'" % (length, n))
python
def _check_panel(self, length): """ Check that given fixed panel length evenly divides index. Parameters ---------- length : int Fixed length with which to subdivide index """ n = len(self.index) if divmod(n, length)[1] != 0: raise ValueError("Panel length '%g' must evenly divide length of series '%g'" % (length, n)) if n == length: raise ValueError("Panel length '%g' cannot be length of series '%g'" % (length, n))
[ "def", "_check_panel", "(", "self", ",", "length", ")", ":", "n", "=", "len", "(", "self", ".", "index", ")", "if", "divmod", "(", "n", ",", "length", ")", "[", "1", "]", "!=", "0", ":", "raise", "ValueError", "(", "\"Panel length '%g' must evenly divide length of series '%g'\"", "%", "(", "length", ",", "n", ")", ")", "if", "n", "==", "length", ":", "raise", "ValueError", "(", "\"Panel length '%g' cannot be length of series '%g'\"", "%", "(", "length", ",", "n", ")", ")" ]
Check that given fixed panel length evenly divides index. Parameters ---------- length : int Fixed length with which to subdivide index
[ "Check", "that", "given", "fixed", "panel", "length", "evenly", "divides", "index", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L442-L457
12,326
thunder-project/thunder
thunder/series/series.py
Series.mean_by_panel
def mean_by_panel(self, length): """ Compute the mean across fixed sized panels of each record. Splits each record into panels of size `length`, and then computes the mean across panels. Panel length must subdivide record exactly. Parameters ---------- length : int Fixed length with which to subdivide. """ self._check_panel(length) func = lambda v: v.reshape(-1, length).mean(axis=0) newindex = arange(length) return self.map(func, index=newindex)
python
def mean_by_panel(self, length): """ Compute the mean across fixed sized panels of each record. Splits each record into panels of size `length`, and then computes the mean across panels. Panel length must subdivide record exactly. Parameters ---------- length : int Fixed length with which to subdivide. """ self._check_panel(length) func = lambda v: v.reshape(-1, length).mean(axis=0) newindex = arange(length) return self.map(func, index=newindex)
[ "def", "mean_by_panel", "(", "self", ",", "length", ")", ":", "self", ".", "_check_panel", "(", "length", ")", "func", "=", "lambda", "v", ":", "v", ".", "reshape", "(", "-", "1", ",", "length", ")", ".", "mean", "(", "axis", "=", "0", ")", "newindex", "=", "arange", "(", "length", ")", "return", "self", ".", "map", "(", "func", ",", "index", "=", "newindex", ")" ]
Compute the mean across fixed sized panels of each record. Splits each record into panels of size `length`, and then computes the mean across panels. Panel length must subdivide record exactly. Parameters ---------- length : int Fixed length with which to subdivide.
[ "Compute", "the", "mean", "across", "fixed", "sized", "panels", "of", "each", "record", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L459-L475
12,327
thunder-project/thunder
thunder/series/series.py
Series._makemasks
def _makemasks(self, index=None, level=0): """ Internal function for generating masks for selecting values based on multi-index values. As all other multi-index functions will call this function, basic type-checking is also performed at this stage. """ if index is None: index = self.index try: dims = len(array(index).shape) if dims == 1: index = array(index, ndmin=2).T except: raise TypeError('A multi-index must be convertible to a numpy ndarray') try: index = index[:, level] except: raise ValueError("Levels must be indices into individual elements of the index") lenIdx = index.shape[0] nlevels = index.shape[1] combs = product(*[unique(index.T[i, :]) for i in range(nlevels)]) combs = array([l for l in combs]) masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs]) return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()])
python
def _makemasks(self, index=None, level=0): """ Internal function for generating masks for selecting values based on multi-index values. As all other multi-index functions will call this function, basic type-checking is also performed at this stage. """ if index is None: index = self.index try: dims = len(array(index).shape) if dims == 1: index = array(index, ndmin=2).T except: raise TypeError('A multi-index must be convertible to a numpy ndarray') try: index = index[:, level] except: raise ValueError("Levels must be indices into individual elements of the index") lenIdx = index.shape[0] nlevels = index.shape[1] combs = product(*[unique(index.T[i, :]) for i in range(nlevels)]) combs = array([l for l in combs]) masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs]) return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()])
[ "def", "_makemasks", "(", "self", ",", "index", "=", "None", ",", "level", "=", "0", ")", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "try", ":", "dims", "=", "len", "(", "array", "(", "index", ")", ".", "shape", ")", "if", "dims", "==", "1", ":", "index", "=", "array", "(", "index", ",", "ndmin", "=", "2", ")", ".", "T", "except", ":", "raise", "TypeError", "(", "'A multi-index must be convertible to a numpy ndarray'", ")", "try", ":", "index", "=", "index", "[", ":", ",", "level", "]", "except", ":", "raise", "ValueError", "(", "\"Levels must be indices into individual elements of the index\"", ")", "lenIdx", "=", "index", ".", "shape", "[", "0", "]", "nlevels", "=", "index", ".", "shape", "[", "1", "]", "combs", "=", "product", "(", "*", "[", "unique", "(", "index", ".", "T", "[", "i", ",", ":", "]", ")", "for", "i", "in", "range", "(", "nlevels", ")", "]", ")", "combs", "=", "array", "(", "[", "l", "for", "l", "in", "combs", "]", ")", "masks", "=", "array", "(", "[", "[", "array_equal", "(", "index", "[", "i", "]", ",", "c", ")", "for", "i", "in", "range", "(", "lenIdx", ")", "]", "for", "c", "in", "combs", "]", ")", "return", "zip", "(", "*", "[", "(", "masks", "[", "x", "]", ",", "combs", "[", "x", "]", ")", "for", "x", "in", "range", "(", "len", "(", "masks", ")", ")", "if", "masks", "[", "x", "]", ".", "any", "(", ")", "]", ")" ]
Internal function for generating masks for selecting values based on multi-index values. As all other multi-index functions will call this function, basic type-checking is also performed at this stage.
[ "Internal", "function", "for", "generating", "masks", "for", "selecting", "values", "based", "on", "multi", "-", "index", "values", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L477-L507
12,328
thunder-project/thunder
thunder/series/series.py
Series._map_by_index
def _map_by_index(self, function, level=0): """ An internal function for maping a function to groups of values based on a multi-index Elements of each record are grouped according to unique value combinations of the multi- index across the given levels of the multi-index. Then the given function is applied to to each of these groups separately. If this function is many-to-one, the result can be recast as a Series indexed by the unique index values used for grouping. """ if type(level) is int: level = [level] masks, ind = self._makemasks(index=self.index, level=level) nMasks = len(masks) newindex = array(ind) if len(newindex[0]) == 1: newindex = ravel(newindex) return self.map(lambda v: asarray([array(function(v[masks[x]])) for x in range(nMasks)]), index=newindex)
python
def _map_by_index(self, function, level=0): """ An internal function for maping a function to groups of values based on a multi-index Elements of each record are grouped according to unique value combinations of the multi- index across the given levels of the multi-index. Then the given function is applied to to each of these groups separately. If this function is many-to-one, the result can be recast as a Series indexed by the unique index values used for grouping. """ if type(level) is int: level = [level] masks, ind = self._makemasks(index=self.index, level=level) nMasks = len(masks) newindex = array(ind) if len(newindex[0]) == 1: newindex = ravel(newindex) return self.map(lambda v: asarray([array(function(v[masks[x]])) for x in range(nMasks)]), index=newindex)
[ "def", "_map_by_index", "(", "self", ",", "function", ",", "level", "=", "0", ")", ":", "if", "type", "(", "level", ")", "is", "int", ":", "level", "=", "[", "level", "]", "masks", ",", "ind", "=", "self", ".", "_makemasks", "(", "index", "=", "self", ".", "index", ",", "level", "=", "level", ")", "nMasks", "=", "len", "(", "masks", ")", "newindex", "=", "array", "(", "ind", ")", "if", "len", "(", "newindex", "[", "0", "]", ")", "==", "1", ":", "newindex", "=", "ravel", "(", "newindex", ")", "return", "self", ".", "map", "(", "lambda", "v", ":", "asarray", "(", "[", "array", "(", "function", "(", "v", "[", "masks", "[", "x", "]", "]", ")", ")", "for", "x", "in", "range", "(", "nMasks", ")", "]", ")", ",", "index", "=", "newindex", ")" ]
An internal function for maping a function to groups of values based on a multi-index Elements of each record are grouped according to unique value combinations of the multi- index across the given levels of the multi-index. Then the given function is applied to to each of these groups separately. If this function is many-to-one, the result can be recast as a Series indexed by the unique index values used for grouping.
[ "An", "internal", "function", "for", "maping", "a", "function", "to", "groups", "of", "values", "based", "on", "a", "multi", "-", "index" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L509-L528
12,329
thunder-project/thunder
thunder/series/series.py
Series.aggregate_by_index
def aggregate_by_index(self, function, level=0): """ Aggregrate data in each record, grouping by index values. For each unique value of the index, applies a function to the group indexed by that value. Returns a Series indexed by those unique values. For the result to be a valid Series object, the aggregating function should return a simple numeric type. Also allows selection of levels within a multi-index. See select_by_index for more info on indices and multi-indices. Parameters ---------- function : function Aggregating function to map to Series values. Should take a list or ndarray as input and return a simple numeric value. level : list of ints, optional, default=0 Specifies the levels of the multi-index to use when determining unique index values. If only a single level is desired, can be an int. """ result = self._map_by_index(function, level=level) return result.map(lambda v: array(v), index=result.index)
python
def aggregate_by_index(self, function, level=0): """ Aggregrate data in each record, grouping by index values. For each unique value of the index, applies a function to the group indexed by that value. Returns a Series indexed by those unique values. For the result to be a valid Series object, the aggregating function should return a simple numeric type. Also allows selection of levels within a multi-index. See select_by_index for more info on indices and multi-indices. Parameters ---------- function : function Aggregating function to map to Series values. Should take a list or ndarray as input and return a simple numeric value. level : list of ints, optional, default=0 Specifies the levels of the multi-index to use when determining unique index values. If only a single level is desired, can be an int. """ result = self._map_by_index(function, level=level) return result.map(lambda v: array(v), index=result.index)
[ "def", "aggregate_by_index", "(", "self", ",", "function", ",", "level", "=", "0", ")", ":", "result", "=", "self", ".", "_map_by_index", "(", "function", ",", "level", "=", "level", ")", "return", "result", ".", "map", "(", "lambda", "v", ":", "array", "(", "v", ")", ",", "index", "=", "result", ".", "index", ")" ]
Aggregrate data in each record, grouping by index values. For each unique value of the index, applies a function to the group indexed by that value. Returns a Series indexed by those unique values. For the result to be a valid Series object, the aggregating function should return a simple numeric type. Also allows selection of levels within a multi-index. See select_by_index for more info on indices and multi-indices. Parameters ---------- function : function Aggregating function to map to Series values. Should take a list or ndarray as input and return a simple numeric value. level : list of ints, optional, default=0 Specifies the levels of the multi-index to use when determining unique index values. If only a single level is desired, can be an int.
[ "Aggregrate", "data", "in", "each", "record", "grouping", "by", "index", "values", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L628-L649
12,330
thunder-project/thunder
thunder/series/series.py
Series.gramian
def gramian(self): """ Compute gramian of a distributed matrix. The gramian is defined as the product of the matrix with its transpose, i.e. A^T * A. """ if self.mode == 'spark': rdd = self.values.tordd() from pyspark.accumulators import AccumulatorParam class MatrixAccumulator(AccumulatorParam): def zero(self, value): return zeros(shape(value)) def addInPlace(self, val1, val2): val1 += val2 return val1 global mat init = zeros((self.shape[1], self.shape[1])) mat = rdd.context.accumulator(init, MatrixAccumulator()) def outer_sum(x): global mat mat += outer(x, x) rdd.values().foreach(outer_sum) return self._constructor(mat.value, index=self.index) if self.mode == 'local': return self._constructor(dot(self.values.T, self.values), index=self.index)
python
def gramian(self): """ Compute gramian of a distributed matrix. The gramian is defined as the product of the matrix with its transpose, i.e. A^T * A. """ if self.mode == 'spark': rdd = self.values.tordd() from pyspark.accumulators import AccumulatorParam class MatrixAccumulator(AccumulatorParam): def zero(self, value): return zeros(shape(value)) def addInPlace(self, val1, val2): val1 += val2 return val1 global mat init = zeros((self.shape[1], self.shape[1])) mat = rdd.context.accumulator(init, MatrixAccumulator()) def outer_sum(x): global mat mat += outer(x, x) rdd.values().foreach(outer_sum) return self._constructor(mat.value, index=self.index) if self.mode == 'local': return self._constructor(dot(self.values.T, self.values), index=self.index)
[ "def", "gramian", "(", "self", ")", ":", "if", "self", ".", "mode", "==", "'spark'", ":", "rdd", "=", "self", ".", "values", ".", "tordd", "(", ")", "from", "pyspark", ".", "accumulators", "import", "AccumulatorParam", "class", "MatrixAccumulator", "(", "AccumulatorParam", ")", ":", "def", "zero", "(", "self", ",", "value", ")", ":", "return", "zeros", "(", "shape", "(", "value", ")", ")", "def", "addInPlace", "(", "self", ",", "val1", ",", "val2", ")", ":", "val1", "+=", "val2", "return", "val1", "global", "mat", "init", "=", "zeros", "(", "(", "self", ".", "shape", "[", "1", "]", ",", "self", ".", "shape", "[", "1", "]", ")", ")", "mat", "=", "rdd", ".", "context", ".", "accumulator", "(", "init", ",", "MatrixAccumulator", "(", ")", ")", "def", "outer_sum", "(", "x", ")", ":", "global", "mat", "mat", "+=", "outer", "(", "x", ",", "x", ")", "rdd", ".", "values", "(", ")", ".", "foreach", "(", "outer_sum", ")", "return", "self", ".", "_constructor", "(", "mat", ".", "value", ",", "index", "=", "self", ".", "index", ")", "if", "self", ".", "mode", "==", "'local'", ":", "return", "self", ".", "_constructor", "(", "dot", "(", "self", ".", "values", ".", "T", ",", "self", ".", "values", ")", ",", "index", "=", "self", ".", "index", ")" ]
Compute gramian of a distributed matrix. The gramian is defined as the product of the matrix with its transpose, i.e. A^T * A.
[ "Compute", "gramian", "of", "a", "distributed", "matrix", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L731-L763
12,331
thunder-project/thunder
thunder/series/series.py
Series.times
def times(self, other): """ Multiply a matrix by another one. Other matrix must be a numpy array, a scalar, or another matrix in local mode. Parameters ---------- other : Matrix, scalar, or numpy array A matrix to multiply with """ if isinstance(other, ScalarType): other = asarray(other) index = self.index else: if isinstance(other, list): other = asarray(other) if isinstance(other, ndarray) and other.ndim < 2: other = expand_dims(other, 1) if not self.shape[1] == other.shape[0]: raise ValueError('shapes %s and %s are not aligned' % (self.shape, other.shape)) index = arange(other.shape[1]) if self.mode == 'local' and isinstance(other, Series) and other.mode == 'spark': raise NotImplementedError if self.mode == 'spark' and isinstance(other, Series) and other.mode == 'spark': raise NotImplementedError if self.mode == 'local' and isinstance(other, (ndarray, ScalarType)): return self._constructor(dot(self.values, other), index=index) if self.mode == 'local' and isinstance(other, Series): return self._constructor(dot(self.values, other.values), index=index) if self.mode == 'spark' and isinstance(other, (ndarray, ScalarType)): return self.map(lambda x: dot(x, other), index=index) if self.mode == 'spark' and isinstance(other, Series): return self.map(lambda x: dot(x, other.values), index=index)
python
def times(self, other): """ Multiply a matrix by another one. Other matrix must be a numpy array, a scalar, or another matrix in local mode. Parameters ---------- other : Matrix, scalar, or numpy array A matrix to multiply with """ if isinstance(other, ScalarType): other = asarray(other) index = self.index else: if isinstance(other, list): other = asarray(other) if isinstance(other, ndarray) and other.ndim < 2: other = expand_dims(other, 1) if not self.shape[1] == other.shape[0]: raise ValueError('shapes %s and %s are not aligned' % (self.shape, other.shape)) index = arange(other.shape[1]) if self.mode == 'local' and isinstance(other, Series) and other.mode == 'spark': raise NotImplementedError if self.mode == 'spark' and isinstance(other, Series) and other.mode == 'spark': raise NotImplementedError if self.mode == 'local' and isinstance(other, (ndarray, ScalarType)): return self._constructor(dot(self.values, other), index=index) if self.mode == 'local' and isinstance(other, Series): return self._constructor(dot(self.values, other.values), index=index) if self.mode == 'spark' and isinstance(other, (ndarray, ScalarType)): return self.map(lambda x: dot(x, other), index=index) if self.mode == 'spark' and isinstance(other, Series): return self.map(lambda x: dot(x, other.values), index=index)
[ "def", "times", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "ScalarType", ")", ":", "other", "=", "asarray", "(", "other", ")", "index", "=", "self", ".", "index", "else", ":", "if", "isinstance", "(", "other", ",", "list", ")", ":", "other", "=", "asarray", "(", "other", ")", "if", "isinstance", "(", "other", ",", "ndarray", ")", "and", "other", ".", "ndim", "<", "2", ":", "other", "=", "expand_dims", "(", "other", ",", "1", ")", "if", "not", "self", ".", "shape", "[", "1", "]", "==", "other", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'shapes %s and %s are not aligned'", "%", "(", "self", ".", "shape", ",", "other", ".", "shape", ")", ")", "index", "=", "arange", "(", "other", ".", "shape", "[", "1", "]", ")", "if", "self", ".", "mode", "==", "'local'", "and", "isinstance", "(", "other", ",", "Series", ")", "and", "other", ".", "mode", "==", "'spark'", ":", "raise", "NotImplementedError", "if", "self", ".", "mode", "==", "'spark'", "and", "isinstance", "(", "other", ",", "Series", ")", "and", "other", ".", "mode", "==", "'spark'", ":", "raise", "NotImplementedError", "if", "self", ".", "mode", "==", "'local'", "and", "isinstance", "(", "other", ",", "(", "ndarray", ",", "ScalarType", ")", ")", ":", "return", "self", ".", "_constructor", "(", "dot", "(", "self", ".", "values", ",", "other", ")", ",", "index", "=", "index", ")", "if", "self", ".", "mode", "==", "'local'", "and", "isinstance", "(", "other", ",", "Series", ")", ":", "return", "self", ".", "_constructor", "(", "dot", "(", "self", ".", "values", ",", "other", ".", "values", ")", ",", "index", "=", "index", ")", "if", "self", ".", "mode", "==", "'spark'", "and", "isinstance", "(", "other", ",", "(", "ndarray", ",", "ScalarType", ")", ")", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "dot", "(", "x", ",", "other", ")", ",", "index", "=", "index", ")", "if", "self", ".", "mode", "==", "'spark'", "and", "isinstance", "(", "other", ",", "Series", ")", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "dot", "(", "x", ",", "other", ".", "values", ")", ",", "index", "=", "index", ")" ]
Multiply a matrix by another one. Other matrix must be a numpy array, a scalar, or another matrix in local mode. Parameters ---------- other : Matrix, scalar, or numpy array A matrix to multiply with
[ "Multiply", "a", "matrix", "by", "another", "one", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L765-L805
12,332
thunder-project/thunder
thunder/series/series.py
Series._makewindows
def _makewindows(self, indices, window): """ Make masks used by windowing functions Given a list of indices specifying window centers, and a window size, construct a list of index arrays, one per window, that index into the target array Parameters ---------- indices : array-like List of times specifying window centers window : int Window size """ div = divmod(window, 2) before = div[0] after = div[0] + div[1] index = asarray(self.index) indices = asarray(indices) if where(index == max(indices))[0][0] + after > len(index): raise ValueError("Maximum requested index %g, with window %g, exceeds length %g" % (max(indices), window, len(index))) if where(index == min(indices))[0][0] - before < 0: raise ValueError("Minimum requested index %g, with window %g, is less than 0" % (min(indices), window)) masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices] return masks
python
def _makewindows(self, indices, window): """ Make masks used by windowing functions Given a list of indices specifying window centers, and a window size, construct a list of index arrays, one per window, that index into the target array Parameters ---------- indices : array-like List of times specifying window centers window : int Window size """ div = divmod(window, 2) before = div[0] after = div[0] + div[1] index = asarray(self.index) indices = asarray(indices) if where(index == max(indices))[0][0] + after > len(index): raise ValueError("Maximum requested index %g, with window %g, exceeds length %g" % (max(indices), window, len(index))) if where(index == min(indices))[0][0] - before < 0: raise ValueError("Minimum requested index %g, with window %g, is less than 0" % (min(indices), window)) masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices] return masks
[ "def", "_makewindows", "(", "self", ",", "indices", ",", "window", ")", ":", "div", "=", "divmod", "(", "window", ",", "2", ")", "before", "=", "div", "[", "0", "]", "after", "=", "div", "[", "0", "]", "+", "div", "[", "1", "]", "index", "=", "asarray", "(", "self", ".", "index", ")", "indices", "=", "asarray", "(", "indices", ")", "if", "where", "(", "index", "==", "max", "(", "indices", ")", ")", "[", "0", "]", "[", "0", "]", "+", "after", ">", "len", "(", "index", ")", ":", "raise", "ValueError", "(", "\"Maximum requested index %g, with window %g, exceeds length %g\"", "%", "(", "max", "(", "indices", ")", ",", "window", ",", "len", "(", "index", ")", ")", ")", "if", "where", "(", "index", "==", "min", "(", "indices", ")", ")", "[", "0", "]", "[", "0", "]", "-", "before", "<", "0", ":", "raise", "ValueError", "(", "\"Minimum requested index %g, with window %g, is less than 0\"", "%", "(", "min", "(", "indices", ")", ",", "window", ")", ")", "masks", "=", "[", "arange", "(", "where", "(", "index", "==", "i", ")", "[", "0", "]", "[", "0", "]", "-", "before", ",", "where", "(", "index", "==", "i", ")", "[", "0", "]", "[", "0", "]", "+", "after", ",", "dtype", "=", "'int'", ")", "for", "i", "in", "indices", "]", "return", "masks" ]
Make masks used by windowing functions Given a list of indices specifying window centers, and a window size, construct a list of index arrays, one per window, that index into the target array Parameters ---------- indices : array-like List of times specifying window centers window : int Window size
[ "Make", "masks", "used", "by", "windowing", "functions" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L807-L835
12,333
thunder-project/thunder
thunder/series/series.py
Series.mean_by_window
def mean_by_window(self, indices, window): """ Average series across multiple windows specified by their centers. Parameters ---------- indices : array-like List of times specifying window centers window : int Window size """ masks = self._makewindows(indices, window) newindex = arange(0, len(masks[0])) return self.map(lambda x: mean([x[m] for m in masks], axis=0), index=newindex)
python
def mean_by_window(self, indices, window): """ Average series across multiple windows specified by their centers. Parameters ---------- indices : array-like List of times specifying window centers window : int Window size """ masks = self._makewindows(indices, window) newindex = arange(0, len(masks[0])) return self.map(lambda x: mean([x[m] for m in masks], axis=0), index=newindex)
[ "def", "mean_by_window", "(", "self", ",", "indices", ",", "window", ")", ":", "masks", "=", "self", ".", "_makewindows", "(", "indices", ",", "window", ")", "newindex", "=", "arange", "(", "0", ",", "len", "(", "masks", "[", "0", "]", ")", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "mean", "(", "[", "x", "[", "m", "]", "for", "m", "in", "masks", "]", ",", "axis", "=", "0", ")", ",", "index", "=", "newindex", ")" ]
Average series across multiple windows specified by their centers. Parameters ---------- indices : array-like List of times specifying window centers window : int Window size
[ "Average", "series", "across", "multiple", "windows", "specified", "by", "their", "centers", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L837-L851
12,334
thunder-project/thunder
thunder/series/series.py
Series.subsample
def subsample(self, sample_factor=2): """ Subsample series by an integer factor. Parameters ---------- sample_factor : positive integer, optional, default=2 Factor for downsampling. """ if sample_factor < 0: raise Exception('Factor for subsampling must be postive, got %g' % sample_factor) s = slice(0, len(self.index), sample_factor) newindex = self.index[s] return self.map(lambda v: v[s], index=newindex)
python
def subsample(self, sample_factor=2): """ Subsample series by an integer factor. Parameters ---------- sample_factor : positive integer, optional, default=2 Factor for downsampling. """ if sample_factor < 0: raise Exception('Factor for subsampling must be postive, got %g' % sample_factor) s = slice(0, len(self.index), sample_factor) newindex = self.index[s] return self.map(lambda v: v[s], index=newindex)
[ "def", "subsample", "(", "self", ",", "sample_factor", "=", "2", ")", ":", "if", "sample_factor", "<", "0", ":", "raise", "Exception", "(", "'Factor for subsampling must be postive, got %g'", "%", "sample_factor", ")", "s", "=", "slice", "(", "0", ",", "len", "(", "self", ".", "index", ")", ",", "sample_factor", ")", "newindex", "=", "self", ".", "index", "[", "s", "]", "return", "self", ".", "map", "(", "lambda", "v", ":", "v", "[", "s", "]", ",", "index", "=", "newindex", ")" ]
Subsample series by an integer factor. Parameters ---------- sample_factor : positive integer, optional, default=2 Factor for downsampling.
[ "Subsample", "series", "by", "an", "integer", "factor", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L853-L866
12,335
thunder-project/thunder
thunder/series/series.py
Series.downsample
def downsample(self, sample_factor=2): """ Downsample series by an integer factor by averaging. Parameters ---------- sample_factor : positive integer, optional, default=2 Factor for downsampling. """ if sample_factor < 0: raise Exception('Factor for subsampling must be postive, got %g' % sample_factor) newlength = floor(len(self.index) / sample_factor) func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1) newindex = arange(newlength) return self.map(func, index=newindex)
python
def downsample(self, sample_factor=2): """ Downsample series by an integer factor by averaging. Parameters ---------- sample_factor : positive integer, optional, default=2 Factor for downsampling. """ if sample_factor < 0: raise Exception('Factor for subsampling must be postive, got %g' % sample_factor) newlength = floor(len(self.index) / sample_factor) func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1) newindex = arange(newlength) return self.map(func, index=newindex)
[ "def", "downsample", "(", "self", ",", "sample_factor", "=", "2", ")", ":", "if", "sample_factor", "<", "0", ":", "raise", "Exception", "(", "'Factor for subsampling must be postive, got %g'", "%", "sample_factor", ")", "newlength", "=", "floor", "(", "len", "(", "self", ".", "index", ")", "/", "sample_factor", ")", "func", "=", "lambda", "v", ":", "v", "[", "0", ":", "int", "(", "newlength", "*", "sample_factor", ")", "]", ".", "reshape", "(", "-", "1", ",", "sample_factor", ")", ".", "mean", "(", "axis", "=", "1", ")", "newindex", "=", "arange", "(", "newlength", ")", "return", "self", ".", "map", "(", "func", ",", "index", "=", "newindex", ")" ]
Downsample series by an integer factor by averaging. Parameters ---------- sample_factor : positive integer, optional, default=2 Factor for downsampling.
[ "Downsample", "series", "by", "an", "integer", "factor", "by", "averaging", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L868-L882
12,336
thunder-project/thunder
thunder/series/series.py
Series.fourier
def fourier(self, freq=None): """ Compute statistics of a Fourier decomposition on series data. Parameters ---------- freq : int Digital frequency at which to compute coherence and phase """ def get(y, freq): y = y - mean(y) nframes = len(y) ft = fft.fft(y) ft = ft[0:int(fix(nframes/2))] ampFt = 2*abs(ft)/nframes amp = ampFt[freq] ampSum = sqrt(sum(ampFt**2)) co = amp / ampSum ph = -(pi/2) - angle(ft[freq]) if ph < 0: ph += pi * 2 return array([co, ph]) if freq >= int(fix(size(self.index)/2)): raise Exception('Requested frequency, %g, is too high, ' 'must be less than half the series duration' % freq) index = ['coherence', 'phase'] return self.map(lambda x: get(x, freq), index=index)
python
def fourier(self, freq=None): """ Compute statistics of a Fourier decomposition on series data. Parameters ---------- freq : int Digital frequency at which to compute coherence and phase """ def get(y, freq): y = y - mean(y) nframes = len(y) ft = fft.fft(y) ft = ft[0:int(fix(nframes/2))] ampFt = 2*abs(ft)/nframes amp = ampFt[freq] ampSum = sqrt(sum(ampFt**2)) co = amp / ampSum ph = -(pi/2) - angle(ft[freq]) if ph < 0: ph += pi * 2 return array([co, ph]) if freq >= int(fix(size(self.index)/2)): raise Exception('Requested frequency, %g, is too high, ' 'must be less than half the series duration' % freq) index = ['coherence', 'phase'] return self.map(lambda x: get(x, freq), index=index)
[ "def", "fourier", "(", "self", ",", "freq", "=", "None", ")", ":", "def", "get", "(", "y", ",", "freq", ")", ":", "y", "=", "y", "-", "mean", "(", "y", ")", "nframes", "=", "len", "(", "y", ")", "ft", "=", "fft", ".", "fft", "(", "y", ")", "ft", "=", "ft", "[", "0", ":", "int", "(", "fix", "(", "nframes", "/", "2", ")", ")", "]", "ampFt", "=", "2", "*", "abs", "(", "ft", ")", "/", "nframes", "amp", "=", "ampFt", "[", "freq", "]", "ampSum", "=", "sqrt", "(", "sum", "(", "ampFt", "**", "2", ")", ")", "co", "=", "amp", "/", "ampSum", "ph", "=", "-", "(", "pi", "/", "2", ")", "-", "angle", "(", "ft", "[", "freq", "]", ")", "if", "ph", "<", "0", ":", "ph", "+=", "pi", "*", "2", "return", "array", "(", "[", "co", ",", "ph", "]", ")", "if", "freq", ">=", "int", "(", "fix", "(", "size", "(", "self", ".", "index", ")", "/", "2", ")", ")", ":", "raise", "Exception", "(", "'Requested frequency, %g, is too high, '", "'must be less than half the series duration'", "%", "freq", ")", "index", "=", "[", "'coherence'", ",", "'phase'", "]", "return", "self", ".", "map", "(", "lambda", "x", ":", "get", "(", "x", ",", "freq", ")", ",", "index", "=", "index", ")" ]
Compute statistics of a Fourier decomposition on series data. Parameters ---------- freq : int Digital frequency at which to compute coherence and phase
[ "Compute", "statistics", "of", "a", "Fourier", "decomposition", "on", "series", "data", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L884-L912
12,337
thunder-project/thunder
thunder/series/series.py
Series.convolve
def convolve(self, signal, mode='full'): """ Convolve series data against another signal. Parameters ---------- signal : array Signal to convolve with (must be 1D) mode : str, optional, default='full' Mode of convolution, options are 'full', 'same', and 'valid' """ from numpy import convolve s = asarray(signal) n = size(self.index) m = size(s) # use expected lengths to make a new index if mode == 'same': newmax = max(n, m) elif mode == 'valid': newmax = max(m, n) - min(m, n) + 1 else: newmax = n+m-1 newindex = arange(0, newmax) return self.map(lambda x: convolve(x, signal, mode), index=newindex)
python
def convolve(self, signal, mode='full'): """ Convolve series data against another signal. Parameters ---------- signal : array Signal to convolve with (must be 1D) mode : str, optional, default='full' Mode of convolution, options are 'full', 'same', and 'valid' """ from numpy import convolve s = asarray(signal) n = size(self.index) m = size(s) # use expected lengths to make a new index if mode == 'same': newmax = max(n, m) elif mode == 'valid': newmax = max(m, n) - min(m, n) + 1 else: newmax = n+m-1 newindex = arange(0, newmax) return self.map(lambda x: convolve(x, signal, mode), index=newindex)
[ "def", "convolve", "(", "self", ",", "signal", ",", "mode", "=", "'full'", ")", ":", "from", "numpy", "import", "convolve", "s", "=", "asarray", "(", "signal", ")", "n", "=", "size", "(", "self", ".", "index", ")", "m", "=", "size", "(", "s", ")", "# use expected lengths to make a new index", "if", "mode", "==", "'same'", ":", "newmax", "=", "max", "(", "n", ",", "m", ")", "elif", "mode", "==", "'valid'", ":", "newmax", "=", "max", "(", "m", ",", "n", ")", "-", "min", "(", "m", ",", "n", ")", "+", "1", "else", ":", "newmax", "=", "n", "+", "m", "-", "1", "newindex", "=", "arange", "(", "0", ",", "newmax", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "convolve", "(", "x", ",", "signal", ",", "mode", ")", ",", "index", "=", "newindex", ")" ]
Convolve series data against another signal. Parameters ---------- signal : array Signal to convolve with (must be 1D) mode : str, optional, default='full' Mode of convolution, options are 'full', 'same', and 'valid'
[ "Convolve", "series", "data", "against", "another", "signal", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L914-L943
12,338
thunder-project/thunder
thunder/series/series.py
Series.crosscorr
def crosscorr(self, signal, lag=0): """ Cross correlate series data against another signal. Parameters ---------- signal : array Signal to correlate against (must be 1D). lag : int Range of lags to consider, will cover (-lag, +lag). """ from scipy.linalg import norm s = asarray(signal) s = s - mean(s) s = s / norm(s) if size(s) != size(self.index): raise Exception('Size of signal to cross correlate with, %g, ' 'does not match size of series' % size(s)) # created a matrix with lagged signals if lag is not 0: shifts = range(-lag, lag+1) d = len(s) m = len(shifts) sshifted = zeros((m, d)) for i in range(0, len(shifts)): tmp = roll(s, shifts[i]) if shifts[i] < 0: tmp[(d+shifts[i]):] = 0 if shifts[i] > 0: tmp[:shifts[i]] = 0 sshifted[i, :] = tmp s = sshifted else: shifts = [0] def get(y, s): y = y - mean(y) n = norm(y) if n == 0: b = zeros((s.shape[0],)) else: y /= n b = dot(s, y) return b return self.map(lambda x: get(x, s), index=shifts)
python
def crosscorr(self, signal, lag=0): """ Cross correlate series data against another signal. Parameters ---------- signal : array Signal to correlate against (must be 1D). lag : int Range of lags to consider, will cover (-lag, +lag). """ from scipy.linalg import norm s = asarray(signal) s = s - mean(s) s = s / norm(s) if size(s) != size(self.index): raise Exception('Size of signal to cross correlate with, %g, ' 'does not match size of series' % size(s)) # created a matrix with lagged signals if lag is not 0: shifts = range(-lag, lag+1) d = len(s) m = len(shifts) sshifted = zeros((m, d)) for i in range(0, len(shifts)): tmp = roll(s, shifts[i]) if shifts[i] < 0: tmp[(d+shifts[i]):] = 0 if shifts[i] > 0: tmp[:shifts[i]] = 0 sshifted[i, :] = tmp s = sshifted else: shifts = [0] def get(y, s): y = y - mean(y) n = norm(y) if n == 0: b = zeros((s.shape[0],)) else: y /= n b = dot(s, y) return b return self.map(lambda x: get(x, s), index=shifts)
[ "def", "crosscorr", "(", "self", ",", "signal", ",", "lag", "=", "0", ")", ":", "from", "scipy", ".", "linalg", "import", "norm", "s", "=", "asarray", "(", "signal", ")", "s", "=", "s", "-", "mean", "(", "s", ")", "s", "=", "s", "/", "norm", "(", "s", ")", "if", "size", "(", "s", ")", "!=", "size", "(", "self", ".", "index", ")", ":", "raise", "Exception", "(", "'Size of signal to cross correlate with, %g, '", "'does not match size of series'", "%", "size", "(", "s", ")", ")", "# created a matrix with lagged signals", "if", "lag", "is", "not", "0", ":", "shifts", "=", "range", "(", "-", "lag", ",", "lag", "+", "1", ")", "d", "=", "len", "(", "s", ")", "m", "=", "len", "(", "shifts", ")", "sshifted", "=", "zeros", "(", "(", "m", ",", "d", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "shifts", ")", ")", ":", "tmp", "=", "roll", "(", "s", ",", "shifts", "[", "i", "]", ")", "if", "shifts", "[", "i", "]", "<", "0", ":", "tmp", "[", "(", "d", "+", "shifts", "[", "i", "]", ")", ":", "]", "=", "0", "if", "shifts", "[", "i", "]", ">", "0", ":", "tmp", "[", ":", "shifts", "[", "i", "]", "]", "=", "0", "sshifted", "[", "i", ",", ":", "]", "=", "tmp", "s", "=", "sshifted", "else", ":", "shifts", "=", "[", "0", "]", "def", "get", "(", "y", ",", "s", ")", ":", "y", "=", "y", "-", "mean", "(", "y", ")", "n", "=", "norm", "(", "y", ")", "if", "n", "==", "0", ":", "b", "=", "zeros", "(", "(", "s", ".", "shape", "[", "0", "]", ",", ")", ")", "else", ":", "y", "/=", "n", "b", "=", "dot", "(", "s", ",", "y", ")", "return", "b", "return", "self", ".", "map", "(", "lambda", "x", ":", "get", "(", "x", ",", "s", ")", ",", "index", "=", "shifts", ")" ]
Cross correlate series data against another signal. Parameters ---------- signal : array Signal to correlate against (must be 1D). lag : int Range of lags to consider, will cover (-lag, +lag).
[ "Cross", "correlate", "series", "data", "against", "another", "signal", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L945-L994
12,339
thunder-project/thunder
thunder/series/series.py
Series.detrend
def detrend(self, method='linear', order=5): """ Detrend series data with linear or nonlinear detrending. Preserve intercept so that subsequent operations can adjust the baseline. Parameters ---------- method : str, optional, default = 'linear' Detrending method order : int, optional, default = 5 Order of polynomial, for non-linear detrending only """ check_options(method, ['linear', 'nonlinear']) if method == 'linear': order = 1 def func(y): x = arange(len(y)) p = polyfit(x, y, order) p[-1] = 0 yy = polyval(p, x) return y - yy return self.map(func)
python
def detrend(self, method='linear', order=5): """ Detrend series data with linear or nonlinear detrending. Preserve intercept so that subsequent operations can adjust the baseline. Parameters ---------- method : str, optional, default = 'linear' Detrending method order : int, optional, default = 5 Order of polynomial, for non-linear detrending only """ check_options(method, ['linear', 'nonlinear']) if method == 'linear': order = 1 def func(y): x = arange(len(y)) p = polyfit(x, y, order) p[-1] = 0 yy = polyval(p, x) return y - yy return self.map(func)
[ "def", "detrend", "(", "self", ",", "method", "=", "'linear'", ",", "order", "=", "5", ")", ":", "check_options", "(", "method", ",", "[", "'linear'", ",", "'nonlinear'", "]", ")", "if", "method", "==", "'linear'", ":", "order", "=", "1", "def", "func", "(", "y", ")", ":", "x", "=", "arange", "(", "len", "(", "y", ")", ")", "p", "=", "polyfit", "(", "x", ",", "y", ",", "order", ")", "p", "[", "-", "1", "]", "=", "0", "yy", "=", "polyval", "(", "p", ",", "x", ")", "return", "y", "-", "yy", "return", "self", ".", "map", "(", "func", ")" ]
Detrend series data with linear or nonlinear detrending. Preserve intercept so that subsequent operations can adjust the baseline. Parameters ---------- method : str, optional, default = 'linear' Detrending method order : int, optional, default = 5 Order of polynomial, for non-linear detrending only
[ "Detrend", "series", "data", "with", "linear", "or", "nonlinear", "detrending", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L996-L1022
12,340
thunder-project/thunder
thunder/series/series.py
Series.normalize
def normalize(self, method='percentile', window=None, perc=20, offset=0.1): """ Normalize by subtracting and dividing by a baseline. Baseline can be derived from a global mean or percentile, or a smoothed percentile estimated within a rolling window. Windowed baselines may only be well-defined for temporal series data. Parameters ---------- baseline : str, optional, default = 'percentile' Quantity to use as the baseline, options are 'mean', 'percentile', 'window', or 'window-exact'. window : int, optional, default = 6 Size of window for baseline estimation, for 'window' and 'window-exact' baseline only. perc : int, optional, default = 20 Percentile value to use, for 'percentile', 'window', or 'window-exact' baseline only. offset : float, optional, default = 0.1 Scalar added to baseline during division to avoid division by 0. """ check_options(method, ['mean', 'percentile', 'window', 'window-exact']) from warnings import warn if not (method == 'window' or method == 'window-exact') and window is not None: warn('Setting window without using method "window" has no effect') if method == 'mean': baseFunc = mean if method == 'percentile': baseFunc = lambda x: percentile(x, perc) if method == 'window': from scipy.ndimage.filters import percentile_filter baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest') if method == 'window-exact': if window & 0x1: left, right = (ceil(window/2), ceil(window/2) + 1) else: left, right = (window/2, window/2) n = len(self.index) baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc) for ix in arange(0, n)]) def get(y): b = baseFunc(y) return (y - b) / (b + offset) return self.map(get)
python
def normalize(self, method='percentile', window=None, perc=20, offset=0.1): """ Normalize by subtracting and dividing by a baseline. Baseline can be derived from a global mean or percentile, or a smoothed percentile estimated within a rolling window. Windowed baselines may only be well-defined for temporal series data. Parameters ---------- baseline : str, optional, default = 'percentile' Quantity to use as the baseline, options are 'mean', 'percentile', 'window', or 'window-exact'. window : int, optional, default = 6 Size of window for baseline estimation, for 'window' and 'window-exact' baseline only. perc : int, optional, default = 20 Percentile value to use, for 'percentile', 'window', or 'window-exact' baseline only. offset : float, optional, default = 0.1 Scalar added to baseline during division to avoid division by 0. """ check_options(method, ['mean', 'percentile', 'window', 'window-exact']) from warnings import warn if not (method == 'window' or method == 'window-exact') and window is not None: warn('Setting window without using method "window" has no effect') if method == 'mean': baseFunc = mean if method == 'percentile': baseFunc = lambda x: percentile(x, perc) if method == 'window': from scipy.ndimage.filters import percentile_filter baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest') if method == 'window-exact': if window & 0x1: left, right = (ceil(window/2), ceil(window/2) + 1) else: left, right = (window/2, window/2) n = len(self.index) baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc) for ix in arange(0, n)]) def get(y): b = baseFunc(y) return (y - b) / (b + offset) return self.map(get)
[ "def", "normalize", "(", "self", ",", "method", "=", "'percentile'", ",", "window", "=", "None", ",", "perc", "=", "20", ",", "offset", "=", "0.1", ")", ":", "check_options", "(", "method", ",", "[", "'mean'", ",", "'percentile'", ",", "'window'", ",", "'window-exact'", "]", ")", "from", "warnings", "import", "warn", "if", "not", "(", "method", "==", "'window'", "or", "method", "==", "'window-exact'", ")", "and", "window", "is", "not", "None", ":", "warn", "(", "'Setting window without using method \"window\" has no effect'", ")", "if", "method", "==", "'mean'", ":", "baseFunc", "=", "mean", "if", "method", "==", "'percentile'", ":", "baseFunc", "=", "lambda", "x", ":", "percentile", "(", "x", ",", "perc", ")", "if", "method", "==", "'window'", ":", "from", "scipy", ".", "ndimage", ".", "filters", "import", "percentile_filter", "baseFunc", "=", "lambda", "x", ":", "percentile_filter", "(", "x", ".", "astype", "(", "float64", ")", ",", "perc", ",", "window", ",", "mode", "=", "'nearest'", ")", "if", "method", "==", "'window-exact'", ":", "if", "window", "&", "0x1", ":", "left", ",", "right", "=", "(", "ceil", "(", "window", "/", "2", ")", ",", "ceil", "(", "window", "/", "2", ")", "+", "1", ")", "else", ":", "left", ",", "right", "=", "(", "window", "/", "2", ",", "window", "/", "2", ")", "n", "=", "len", "(", "self", ".", "index", ")", "baseFunc", "=", "lambda", "x", ":", "asarray", "(", "[", "percentile", "(", "x", "[", "max", "(", "ix", "-", "left", ",", "0", ")", ":", "min", "(", "ix", "+", "right", "+", "1", ",", "n", ")", "]", ",", "perc", ")", "for", "ix", "in", "arange", "(", "0", ",", "n", ")", "]", ")", "def", "get", "(", "y", ")", ":", "b", "=", "baseFunc", "(", "y", ")", "return", "(", "y", "-", "b", ")", "/", "(", "b", "+", "offset", ")", "return", "self", ".", "map", "(", "get", ")" ]
Normalize by subtracting and dividing by a baseline. Baseline can be derived from a global mean or percentile, or a smoothed percentile estimated within a rolling window. Windowed baselines may only be well-defined for temporal series data. Parameters ---------- baseline : str, optional, default = 'percentile' Quantity to use as the baseline, options are 'mean', 'percentile', 'window', or 'window-exact'. window : int, optional, default = 6 Size of window for baseline estimation, for 'window' and 'window-exact' baseline only. perc : int, optional, default = 20 Percentile value to use, for 'percentile', 'window', or 'window-exact' baseline only. offset : float, optional, default = 0.1 Scalar added to baseline during division to avoid division by 0.
[ "Normalize", "by", "subtracting", "and", "dividing", "by", "a", "baseline", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L1024-L1081
12,341
thunder-project/thunder
thunder/series/series.py
Series.toimages
def toimages(self, chunk_size='auto'): """ Converts to images data. This method is equivalent to series.toblocks(size).toimages(). Parameters ---------- chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto', which will choose a chunk size to make the resulting blocks ~100 MB in size. Int interpreted as 'number of elements'. Only valid in spark mode. """ from thunder.images.images import Images if chunk_size is 'auto': chunk_size = str(max([int(1e5/prod(self.baseshape)), 1])) n = len(self.shape) - 1 if self.mode == 'spark': return Images(self.values.swap(tuple(range(n)), (0,), size=chunk_size)) if self.mode == 'local': return Images(self.values.transpose((n,) + tuple(range(0, n))))
python
def toimages(self, chunk_size='auto'): """ Converts to images data. This method is equivalent to series.toblocks(size).toimages(). Parameters ---------- chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto', which will choose a chunk size to make the resulting blocks ~100 MB in size. Int interpreted as 'number of elements'. Only valid in spark mode. """ from thunder.images.images import Images if chunk_size is 'auto': chunk_size = str(max([int(1e5/prod(self.baseshape)), 1])) n = len(self.shape) - 1 if self.mode == 'spark': return Images(self.values.swap(tuple(range(n)), (0,), size=chunk_size)) if self.mode == 'local': return Images(self.values.transpose((n,) + tuple(range(0, n))))
[ "def", "toimages", "(", "self", ",", "chunk_size", "=", "'auto'", ")", ":", "from", "thunder", ".", "images", ".", "images", "import", "Images", "if", "chunk_size", "is", "'auto'", ":", "chunk_size", "=", "str", "(", "max", "(", "[", "int", "(", "1e5", "/", "prod", "(", "self", ".", "baseshape", ")", ")", ",", "1", "]", ")", ")", "n", "=", "len", "(", "self", ".", "shape", ")", "-", "1", "if", "self", ".", "mode", "==", "'spark'", ":", "return", "Images", "(", "self", ".", "values", ".", "swap", "(", "tuple", "(", "range", "(", "n", ")", ")", ",", "(", "0", ",", ")", ",", "size", "=", "chunk_size", ")", ")", "if", "self", ".", "mode", "==", "'local'", ":", "return", "Images", "(", "self", ".", "values", ".", "transpose", "(", "(", "n", ",", ")", "+", "tuple", "(", "range", "(", "0", ",", "n", ")", ")", ")", ")" ]
Converts to images data. This method is equivalent to series.toblocks(size).toimages(). Parameters ---------- chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto', which will choose a chunk size to make the resulting blocks ~100 MB in size. Int interpreted as 'number of elements'. Only valid in spark mode.
[ "Converts", "to", "images", "data", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L1083-L1108
12,342
thunder-project/thunder
thunder/series/series.py
Series.tobinary
def tobinary(self, path, prefix='series', overwrite=False, credentials=None): """ Write data to binary files. Parameters ---------- path : string path or URI to directory to be created Output files will be written underneath path. Directory will be created as a result of this call. prefix : str, optional, default = 'series' String prefix for files. overwrite : bool If true, path and all its contents will be deleted and recreated as partof this call. """ from thunder.series.writers import tobinary tobinary(self, path, prefix=prefix, overwrite=overwrite, credentials=credentials)
python
def tobinary(self, path, prefix='series', overwrite=False, credentials=None): """ Write data to binary files. Parameters ---------- path : string path or URI to directory to be created Output files will be written underneath path. Directory will be created as a result of this call. prefix : str, optional, default = 'series' String prefix for files. overwrite : bool If true, path and all its contents will be deleted and recreated as partof this call. """ from thunder.series.writers import tobinary tobinary(self, path, prefix=prefix, overwrite=overwrite, credentials=credentials)
[ "def", "tobinary", "(", "self", ",", "path", ",", "prefix", "=", "'series'", ",", "overwrite", "=", "False", ",", "credentials", "=", "None", ")", ":", "from", "thunder", ".", "series", ".", "writers", "import", "tobinary", "tobinary", "(", "self", ",", "path", ",", "prefix", "=", "prefix", ",", "overwrite", "=", "overwrite", ",", "credentials", "=", "credentials", ")" ]
Write data to binary files. Parameters ---------- path : string path or URI to directory to be created Output files will be written underneath path. Directory will be created as a result of this call. prefix : str, optional, default = 'series' String prefix for files. overwrite : bool If true, path and all its contents will be deleted and recreated as partof this call.
[ "Write", "data", "to", "binary", "files", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L1110-L1128
12,343
thunder-project/thunder
thunder/readers.py
addextension
def addextension(path, ext=None): """ Helper function for handling of paths given separately passed file extensions. """ if ext: if '*' in path: return path elif os.path.splitext(path)[1]: return path else: if not ext.startswith('.'): ext = '.'+ext if not path.endswith(ext): if not path.endswith(os.path.sep): path += os.path.sep return path + '*' + ext else: return path else: return path
python
def addextension(path, ext=None): """ Helper function for handling of paths given separately passed file extensions. """ if ext: if '*' in path: return path elif os.path.splitext(path)[1]: return path else: if not ext.startswith('.'): ext = '.'+ext if not path.endswith(ext): if not path.endswith(os.path.sep): path += os.path.sep return path + '*' + ext else: return path else: return path
[ "def", "addextension", "(", "path", ",", "ext", "=", "None", ")", ":", "if", "ext", ":", "if", "'*'", "in", "path", ":", "return", "path", "elif", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", ":", "return", "path", "else", ":", "if", "not", "ext", ".", "startswith", "(", "'.'", ")", ":", "ext", "=", "'.'", "+", "ext", "if", "not", "path", ".", "endswith", "(", "ext", ")", ":", "if", "not", "path", ".", "endswith", "(", "os", ".", "path", ".", "sep", ")", ":", "path", "+=", "os", ".", "path", ".", "sep", "return", "path", "+", "'*'", "+", "ext", "else", ":", "return", "path", "else", ":", "return", "path" ]
Helper function for handling of paths given separately passed file extensions.
[ "Helper", "function", "for", "handling", "of", "paths", "given", "separately", "passed", "file", "extensions", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L21-L40
12,344
thunder-project/thunder
thunder/readers.py
select
def select(files, start, stop): """ Helper function for handling start and stop indices """ if start or stop: if start is None: start = 0 if stop is None: stop = len(files) files = files[start:stop] return files
python
def select(files, start, stop): """ Helper function for handling start and stop indices """ if start or stop: if start is None: start = 0 if stop is None: stop = len(files) files = files[start:stop] return files
[ "def", "select", "(", "files", ",", "start", ",", "stop", ")", ":", "if", "start", "or", "stop", ":", "if", "start", "is", "None", ":", "start", "=", "0", "if", "stop", "is", "None", ":", "stop", "=", "len", "(", "files", ")", "files", "=", "files", "[", "start", ":", "stop", "]", "return", "files" ]
Helper function for handling start and stop indices
[ "Helper", "function", "for", "handling", "start", "and", "stop", "indices" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L42-L52
12,345
thunder-project/thunder
thunder/readers.py
listrecursive
def listrecursive(path, ext=None): """ List files recurisvely """ filenames = set() for root, dirs, files in os.walk(path): if ext: if ext == 'tif' or ext == 'tiff': tmp = fnmatch.filter(files, '*.' + 'tiff') files = tmp + fnmatch.filter(files, '*.' + 'tif') else: files = fnmatch.filter(files, '*.' + ext) for filename in files: filenames.add(os.path.join(root, filename)) filenames = list(filenames) filenames.sort() return sorted(filenames)
python
def listrecursive(path, ext=None): """ List files recurisvely """ filenames = set() for root, dirs, files in os.walk(path): if ext: if ext == 'tif' or ext == 'tiff': tmp = fnmatch.filter(files, '*.' + 'tiff') files = tmp + fnmatch.filter(files, '*.' + 'tif') else: files = fnmatch.filter(files, '*.' + ext) for filename in files: filenames.add(os.path.join(root, filename)) filenames = list(filenames) filenames.sort() return sorted(filenames)
[ "def", "listrecursive", "(", "path", ",", "ext", "=", "None", ")", ":", "filenames", "=", "set", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "if", "ext", ":", "if", "ext", "==", "'tif'", "or", "ext", "==", "'tiff'", ":", "tmp", "=", "fnmatch", ".", "filter", "(", "files", ",", "'*.'", "+", "'tiff'", ")", "files", "=", "tmp", "+", "fnmatch", ".", "filter", "(", "files", ",", "'*.'", "+", "'tif'", ")", "else", ":", "files", "=", "fnmatch", ".", "filter", "(", "files", ",", "'*.'", "+", "ext", ")", "for", "filename", "in", "files", ":", "filenames", ".", "add", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "filenames", "=", "list", "(", "filenames", ")", "filenames", ".", "sort", "(", ")", "return", "sorted", "(", "filenames", ")" ]
List files recurisvely
[ "List", "files", "recurisvely" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L72-L88
12,346
thunder-project/thunder
thunder/readers.py
listflat
def listflat(path, ext=None): """ List files without recursion """ if os.path.isdir(path): if ext: if ext == 'tif' or ext == 'tiff': files = glob.glob(os.path.join(path, '*.tif')) files = files + glob.glob(os.path.join(path, '*.tiff')) else: files = glob.glob(os.path.join(path, '*.' + ext)) else: files = [os.path.join(path, fname) for fname in os.listdir(path)] else: files = glob.glob(path) # filter out directories files = [fpath for fpath in files if not isinstance(fpath, list) and not os.path.isdir(fpath)] return sorted(files)
python
def listflat(path, ext=None): """ List files without recursion """ if os.path.isdir(path): if ext: if ext == 'tif' or ext == 'tiff': files = glob.glob(os.path.join(path, '*.tif')) files = files + glob.glob(os.path.join(path, '*.tiff')) else: files = glob.glob(os.path.join(path, '*.' + ext)) else: files = [os.path.join(path, fname) for fname in os.listdir(path)] else: files = glob.glob(path) # filter out directories files = [fpath for fpath in files if not isinstance(fpath, list) and not os.path.isdir(fpath)] return sorted(files)
[ "def", "listflat", "(", "path", ",", "ext", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "if", "ext", ":", "if", "ext", "==", "'tif'", "or", "ext", "==", "'tiff'", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.tif'", ")", ")", "files", "=", "files", "+", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.tiff'", ")", ")", "else", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.'", "+", "ext", ")", ")", "else", ":", "files", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "fname", ")", "for", "fname", "in", "os", ".", "listdir", "(", "path", ")", "]", "else", ":", "files", "=", "glob", ".", "glob", "(", "path", ")", "# filter out directories", "files", "=", "[", "fpath", "for", "fpath", "in", "files", "if", "not", "isinstance", "(", "fpath", ",", "list", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "fpath", ")", "]", "return", "sorted", "(", "files", ")" ]
List files without recursion
[ "List", "files", "without", "recursion" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L90-L107
12,347
thunder-project/thunder
thunder/readers.py
normalize_scheme
def normalize_scheme(path, ext): """ Normalize scheme for paths related to hdfs """ path = addextension(path, ext) parsed = urlparse(path) if parsed.scheme: # this appears to already be a fully-qualified URI return path else: # this looks like a local path spec import os dirname, filename = os.path.split(path) if not os.path.isabs(dirname): # need to make relative local paths absolute dirname = os.path.abspath(dirname) path = os.path.join(dirname, filename) return "file://" + path
python
def normalize_scheme(path, ext): """ Normalize scheme for paths related to hdfs """ path = addextension(path, ext) parsed = urlparse(path) if parsed.scheme: # this appears to already be a fully-qualified URI return path else: # this looks like a local path spec import os dirname, filename = os.path.split(path) if not os.path.isabs(dirname): # need to make relative local paths absolute dirname = os.path.abspath(dirname) path = os.path.join(dirname, filename) return "file://" + path
[ "def", "normalize_scheme", "(", "path", ",", "ext", ")", ":", "path", "=", "addextension", "(", "path", ",", "ext", ")", "parsed", "=", "urlparse", "(", "path", ")", "if", "parsed", ".", "scheme", ":", "# this appears to already be a fully-qualified URI", "return", "path", "else", ":", "# this looks like a local path spec", "import", "os", "dirname", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "dirname", ")", ":", "# need to make relative local paths absolute", "dirname", "=", "os", ".", "path", ".", "abspath", "(", "dirname", ")", "path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", "return", "\"file://\"", "+", "path" ]
Normalize scheme for paths related to hdfs
[ "Normalize", "scheme", "for", "paths", "related", "to", "hdfs" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L620-L638
12,348
thunder-project/thunder
thunder/readers.py
LocalParallelReader.list
def list(path, ext=None, start=None, stop=None, recursive=False): """ Get sorted list of file paths matching path and extension """ files = listflat(path, ext) if not recursive else listrecursive(path, ext) if len(files) < 1: raise FileNotFoundError('Cannot find files of type "%s" in %s' % (ext if ext else '*', path)) files = select(files, start, stop) return files
python
def list(path, ext=None, start=None, stop=None, recursive=False): """ Get sorted list of file paths matching path and extension """ files = listflat(path, ext) if not recursive else listrecursive(path, ext) if len(files) < 1: raise FileNotFoundError('Cannot find files of type "%s" in %s' % (ext if ext else '*', path)) files = select(files, start, stop) return files
[ "def", "list", "(", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ")", ":", "files", "=", "listflat", "(", "path", ",", "ext", ")", "if", "not", "recursive", "else", "listrecursive", "(", "path", ",", "ext", ")", "if", "len", "(", "files", ")", "<", "1", ":", "raise", "FileNotFoundError", "(", "'Cannot find files of type \"%s\" in %s'", "%", "(", "ext", "if", "ext", "else", "'*'", ",", "path", ")", ")", "files", "=", "select", "(", "files", ",", "start", ",", "stop", ")", "return", "files" ]
Get sorted list of file paths matching path and extension
[ "Get", "sorted", "list", "of", "file", "paths", "matching", "path", "and", "extension" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L133-L143
12,349
thunder-project/thunder
thunder/readers.py
LocalParallelReader.read
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across files specified by dataPath on local filesystem. Returns RDD of <integer file index, string buffer> k/v pairs. """ path = uri_to_path(path) files = self.list(path, ext=ext, start=start, stop=stop, recursive=recursive) nfiles = len(files) self.nfiles = nfiles if spark and isinstance(self.engine, spark): npartitions = min(npartitions, nfiles) if npartitions else nfiles rdd = self.engine.parallelize(enumerate(files), npartitions) return rdd.map(lambda kv: (kv[0], readlocal(kv[1]), kv[1])) else: return [(k, readlocal(v), v) for k, v in enumerate(files)]
python
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across files specified by dataPath on local filesystem. Returns RDD of <integer file index, string buffer> k/v pairs. """ path = uri_to_path(path) files = self.list(path, ext=ext, start=start, stop=stop, recursive=recursive) nfiles = len(files) self.nfiles = nfiles if spark and isinstance(self.engine, spark): npartitions = min(npartitions, nfiles) if npartitions else nfiles rdd = self.engine.parallelize(enumerate(files), npartitions) return rdd.map(lambda kv: (kv[0], readlocal(kv[1]), kv[1])) else: return [(k, readlocal(v), v) for k, v in enumerate(files)]
[ "def", "read", "(", "self", ",", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ")", ":", "path", "=", "uri_to_path", "(", "path", ")", "files", "=", "self", ".", "list", "(", "path", ",", "ext", "=", "ext", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ")", "nfiles", "=", "len", "(", "files", ")", "self", ".", "nfiles", "=", "nfiles", "if", "spark", "and", "isinstance", "(", "self", ".", "engine", ",", "spark", ")", ":", "npartitions", "=", "min", "(", "npartitions", ",", "nfiles", ")", "if", "npartitions", "else", "nfiles", "rdd", "=", "self", ".", "engine", ".", "parallelize", "(", "enumerate", "(", "files", ")", ",", "npartitions", ")", "return", "rdd", ".", "map", "(", "lambda", "kv", ":", "(", "kv", "[", "0", "]", ",", "readlocal", "(", "kv", "[", "1", "]", ")", ",", "kv", "[", "1", "]", ")", ")", "else", ":", "return", "[", "(", "k", ",", "readlocal", "(", "v", ")", ",", "v", ")", "for", "k", ",", "v", "in", "enumerate", "(", "files", ")", "]" ]
Sets up Spark RDD across files specified by dataPath on local filesystem. Returns RDD of <integer file index, string buffer> k/v pairs.
[ "Sets", "up", "Spark", "RDD", "across", "files", "specified", "by", "dataPath", "on", "local", "filesystem", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L145-L162
12,350
thunder-project/thunder
thunder/readers.py
LocalFileReader.list
def list(path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List files specified by dataPath. Datapath may include a single wildcard ('*') in the filename specifier. Returns sorted list of absolute path strings. """ path = uri_to_path(path) if not filename and recursive: return listrecursive(path) if filename: if os.path.isdir(path): path = os.path.join(path, filename) else: path = os.path.join(os.path.dirname(path), filename) else: if os.path.isdir(path) and not directories: path = os.path.join(path, "*") files = glob.glob(path) if not directories: files = [fpath for fpath in files if not os.path.isdir(fpath)] files.sort() files = select(files, start, stop) return files
python
def list(path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List files specified by dataPath. Datapath may include a single wildcard ('*') in the filename specifier. Returns sorted list of absolute path strings. """ path = uri_to_path(path) if not filename and recursive: return listrecursive(path) if filename: if os.path.isdir(path): path = os.path.join(path, filename) else: path = os.path.join(os.path.dirname(path), filename) else: if os.path.isdir(path) and not directories: path = os.path.join(path, "*") files = glob.glob(path) if not directories: files = [fpath for fpath in files if not os.path.isdir(fpath)] files.sort() files = select(files, start, stop) return files
[ "def", "list", "(", "path", ",", "filename", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "directories", "=", "False", ")", ":", "path", "=", "uri_to_path", "(", "path", ")", "if", "not", "filename", "and", "recursive", ":", "return", "listrecursive", "(", "path", ")", "if", "filename", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "filename", ")", "else", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "not", "directories", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"*\"", ")", "files", "=", "glob", ".", "glob", "(", "path", ")", "if", "not", "directories", ":", "files", "=", "[", "fpath", "for", "fpath", "in", "files", "if", "not", "os", ".", "path", ".", "isdir", "(", "fpath", ")", "]", "files", ".", "sort", "(", ")", "files", "=", "select", "(", "files", ",", "start", ",", "stop", ")", "return", "files" ]
List files specified by dataPath. Datapath may include a single wildcard ('*') in the filename specifier. Returns sorted list of absolute path strings.
[ "List", "files", "specified", "by", "dataPath", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L173-L202
12,351
thunder-project/thunder
thunder/readers.py
BotoClient.parse_query
def parse_query(query, delim='/'): """ Parse a boto query """ key = '' prefix = '' postfix = '' parsed = urlparse(query) query = parsed.path.lstrip(delim) bucket = parsed.netloc if not parsed.scheme.lower() in ('', "gs", "s3", "s3n"): raise ValueError("Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got: '%s'" % parsed.scheme) storage = parsed.scheme.lower() if not bucket.strip() and query: toks = query.split(delim, 1) bucket = toks[0] if len(toks) == 2: key = toks[1] else: key = '' if not bucket.strip(): raise ValueError("Could not parse bucket name from query string '%s'" % query) tokens = query.split("*") n = len(tokens) if n == 0: pass elif n == 1: key = tokens[0] elif n == 2: index = tokens[0].rfind(delim) if index >= 0: key = tokens[0][:(index + 1)] prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else '' else: prefix = tokens[0] postfix = tokens[1] else: raise ValueError("Only one wildcard ('*') allowed in query string, got: '%s'" % query) return storage, bucket, key, prefix, postfix
python
def parse_query(query, delim='/'): """ Parse a boto query """ key = '' prefix = '' postfix = '' parsed = urlparse(query) query = parsed.path.lstrip(delim) bucket = parsed.netloc if not parsed.scheme.lower() in ('', "gs", "s3", "s3n"): raise ValueError("Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got: '%s'" % parsed.scheme) storage = parsed.scheme.lower() if not bucket.strip() and query: toks = query.split(delim, 1) bucket = toks[0] if len(toks) == 2: key = toks[1] else: key = '' if not bucket.strip(): raise ValueError("Could not parse bucket name from query string '%s'" % query) tokens = query.split("*") n = len(tokens) if n == 0: pass elif n == 1: key = tokens[0] elif n == 2: index = tokens[0].rfind(delim) if index >= 0: key = tokens[0][:(index + 1)] prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else '' else: prefix = tokens[0] postfix = tokens[1] else: raise ValueError("Only one wildcard ('*') allowed in query string, got: '%s'" % query) return storage, bucket, key, prefix, postfix
[ "def", "parse_query", "(", "query", ",", "delim", "=", "'/'", ")", ":", "key", "=", "''", "prefix", "=", "''", "postfix", "=", "''", "parsed", "=", "urlparse", "(", "query", ")", "query", "=", "parsed", ".", "path", ".", "lstrip", "(", "delim", ")", "bucket", "=", "parsed", ".", "netloc", "if", "not", "parsed", ".", "scheme", ".", "lower", "(", ")", "in", "(", "''", ",", "\"gs\"", ",", "\"s3\"", ",", "\"s3n\"", ")", ":", "raise", "ValueError", "(", "\"Query scheme must be one of '', 'gs', 's3', or 's3n'; \"", "\"got: '%s'\"", "%", "parsed", ".", "scheme", ")", "storage", "=", "parsed", ".", "scheme", ".", "lower", "(", ")", "if", "not", "bucket", ".", "strip", "(", ")", "and", "query", ":", "toks", "=", "query", ".", "split", "(", "delim", ",", "1", ")", "bucket", "=", "toks", "[", "0", "]", "if", "len", "(", "toks", ")", "==", "2", ":", "key", "=", "toks", "[", "1", "]", "else", ":", "key", "=", "''", "if", "not", "bucket", ".", "strip", "(", ")", ":", "raise", "ValueError", "(", "\"Could not parse bucket name from query string '%s'\"", "%", "query", ")", "tokens", "=", "query", ".", "split", "(", "\"*\"", ")", "n", "=", "len", "(", "tokens", ")", "if", "n", "==", "0", ":", "pass", "elif", "n", "==", "1", ":", "key", "=", "tokens", "[", "0", "]", "elif", "n", "==", "2", ":", "index", "=", "tokens", "[", "0", "]", ".", "rfind", "(", "delim", ")", "if", "index", ">=", "0", ":", "key", "=", "tokens", "[", "0", "]", "[", ":", "(", "index", "+", "1", ")", "]", "prefix", "=", "tokens", "[", "0", "]", "[", "(", "index", "+", "1", ")", ":", "]", "if", "len", "(", "tokens", "[", "0", "]", ")", ">", "(", "index", "+", "1", ")", "else", "''", "else", ":", "prefix", "=", "tokens", "[", "0", "]", "postfix", "=", "tokens", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "\"Only one wildcard ('*') allowed in query string, got: '%s'\"", "%", "query", ")", "return", "storage", ",", "bucket", ",", "key", ",", "prefix", ",", "postfix" ]
Parse a boto query
[ "Parse", "a", "boto", "query" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L233-L278
12,352
thunder-project/thunder
thunder/readers.py
BotoClient.retrieve_keys
def retrieve_keys(bucket, key, prefix='', postfix='', delim='/', directories=False, recursive=False): """ Retrieve keys from a bucket """ if key and prefix: assert key.endswith(delim) key += prefix # check whether key is a directory if not key.endswith(delim) and key: # check for matching prefix if BotoClient.check_prefix(bucket, key + delim, delim=delim): # found a directory key += delim listdelim = delim if not recursive else None results = bucket.list(prefix=key, delimiter=listdelim) if postfix: func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True) return filter(func, results) elif not directories: func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False) return filter(func, results) else: return results
python
def retrieve_keys(bucket, key, prefix='', postfix='', delim='/', directories=False, recursive=False): """ Retrieve keys from a bucket """ if key and prefix: assert key.endswith(delim) key += prefix # check whether key is a directory if not key.endswith(delim) and key: # check for matching prefix if BotoClient.check_prefix(bucket, key + delim, delim=delim): # found a directory key += delim listdelim = delim if not recursive else None results = bucket.list(prefix=key, delimiter=listdelim) if postfix: func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True) return filter(func, results) elif not directories: func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False) return filter(func, results) else: return results
[ "def", "retrieve_keys", "(", "bucket", ",", "key", ",", "prefix", "=", "''", ",", "postfix", "=", "''", ",", "delim", "=", "'/'", ",", "directories", "=", "False", ",", "recursive", "=", "False", ")", ":", "if", "key", "and", "prefix", ":", "assert", "key", ".", "endswith", "(", "delim", ")", "key", "+=", "prefix", "# check whether key is a directory", "if", "not", "key", ".", "endswith", "(", "delim", ")", "and", "key", ":", "# check for matching prefix", "if", "BotoClient", ".", "check_prefix", "(", "bucket", ",", "key", "+", "delim", ",", "delim", "=", "delim", ")", ":", "# found a directory", "key", "+=", "delim", "listdelim", "=", "delim", "if", "not", "recursive", "else", "None", "results", "=", "bucket", ".", "list", "(", "prefix", "=", "key", ",", "delimiter", "=", "listdelim", ")", "if", "postfix", ":", "func", "=", "lambda", "k_", ":", "BotoClient", ".", "filter_predicate", "(", "k_", ",", "postfix", ",", "inclusive", "=", "True", ")", "return", "filter", "(", "func", ",", "results", ")", "elif", "not", "directories", ":", "func", "=", "lambda", "k_", ":", "BotoClient", ".", "filter_predicate", "(", "k_", ",", "delim", ",", "inclusive", "=", "False", ")", "return", "filter", "(", "func", ",", "results", ")", "else", ":", "return", "results" ]
Retrieve keys from a bucket
[ "Retrieve", "keys", "from", "a", "bucket" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L291-L316
12,353
thunder-project/thunder
thunder/readers.py
BotoParallelReader.getfiles
def getfiles(self, path, ext=None, start=None, stop=None, recursive=False): """ Get scheme, bucket, and keys for a set of files """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(parse[1]) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) keys = BotoClient.retrieve_keys( bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive) keylist = [key.name for key in keys] if ext: if ext == 'tif' or ext == 'tiff': keylist = [keyname for keyname in keylist if keyname.endswith('tif')] keylist.append([keyname for keyname in keylist if keyname.endswith('tiff')]) else: keylist = [keyname for keyname in keylist if keyname.endswith(ext)] keylist.sort() keylist = select(keylist, start, stop) return scheme, bucket.name, keylist
python
def getfiles(self, path, ext=None, start=None, stop=None, recursive=False): """ Get scheme, bucket, and keys for a set of files """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(parse[1]) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) keys = BotoClient.retrieve_keys( bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive) keylist = [key.name for key in keys] if ext: if ext == 'tif' or ext == 'tiff': keylist = [keyname for keyname in keylist if keyname.endswith('tif')] keylist.append([keyname for keyname in keylist if keyname.endswith('tiff')]) else: keylist = [keyname for keyname in keylist if keyname.endswith(ext)] keylist.sort() keylist = select(keylist, start, stop) return scheme, bucket.name, keylist
[ "def", "getfiles", "(", "self", ",", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parse", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parse", "[", "0", "]", "bucket_name", "=", "parse", "[", "1", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "parse", "[", "1", "]", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "keys", "=", "BotoClient", ".", "retrieve_keys", "(", "bucket", ",", "parse", "[", "2", "]", ",", "prefix", "=", "parse", "[", "3", "]", ",", "postfix", "=", "parse", "[", "4", "]", ",", "recursive", "=", "recursive", ")", "keylist", "=", "[", "key", ".", "name", "for", "key", "in", "keys", "]", "if", "ext", ":", "if", "ext", "==", "'tif'", "or", "ext", "==", "'tiff'", ":", "keylist", "=", "[", "keyname", "for", "keyname", "in", "keylist", "if", "keyname", ".", "endswith", "(", "'tif'", ")", "]", "keylist", ".", "append", "(", "[", "keyname", "for", "keyname", "in", "keylist", "if", "keyname", ".", "endswith", "(", "'tiff'", ")", "]", ")", "else", ":", "keylist", "=", "[", "keyname", "for", "keyname", "in", "keylist", "if", "keyname", ".", "endswith", "(", "ext", ")", "]", "keylist", ".", "sort", "(", ")", "keylist", "=", "select", "(", "keylist", ",", "start", ",", "stop", ")", "return", "scheme", ",", "bucket", ".", "name", ",", "keylist" ]
Get scheme, bucket, and keys for a set of files
[ "Get", "scheme", "bucket", "and", "keys", "for", "a", "set", "of", "files" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L328-L361
12,354
thunder-project/thunder
thunder/readers.py
BotoParallelReader.list
def list(self, dataPath, ext=None, start=None, stop=None, recursive=False): """ List files from remote storage """ scheme, bucket_name, keylist = self.getfiles( dataPath, ext=ext, start=start, stop=stop, recursive=recursive) return ["%s:///%s/%s" % (scheme, bucket_name, key) for key in keylist]
python
def list(self, dataPath, ext=None, start=None, stop=None, recursive=False): """ List files from remote storage """ scheme, bucket_name, keylist = self.getfiles( dataPath, ext=ext, start=start, stop=stop, recursive=recursive) return ["%s:///%s/%s" % (scheme, bucket_name, key) for key in keylist]
[ "def", "list", "(", "self", ",", "dataPath", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ")", ":", "scheme", ",", "bucket_name", ",", "keylist", "=", "self", ".", "getfiles", "(", "dataPath", ",", "ext", "=", "ext", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ")", "return", "[", "\"%s:///%s/%s\"", "%", "(", "scheme", ",", "bucket_name", ",", "key", ")", "for", "key", "in", "keylist", "]" ]
List files from remote storage
[ "List", "files", "from", "remote", "storage" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L363-L370
12,355
thunder-project/thunder
thunder/readers.py
BotoParallelReader.read
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs. """ from .utils import connection_with_anon, connection_with_gs path = addextension(path, ext) scheme, bucket_name, keylist = self.getfiles( path, start=start, stop=stop, recursive=recursive) if not keylist: raise FileNotFoundError("No objects found for '%s'" % path) credentials = self.credentials self.nfiles = len(keylist) if spark and isinstance(self.engine, spark): def getsplit(kvIter): if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = boto.storage_uri(bucket_name, 'gs') bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) for kv in kvIter: idx, keyname = kv key = bucket.get_key(keyname) buf = key.get_contents_as_string() yield idx, buf, keyname npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles rdd = self.engine.parallelize(enumerate(keylist), npartitions) return rdd.mapPartitions(getsplit) else: if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) def getsplit(kv): idx, keyName = kv key = bucket.get_key(keyName) buf = key.get_contents_as_string() return idx, buf, keyName return [getsplit(kv) for kv in enumerate(keylist)]
python
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs. """ from .utils import connection_with_anon, connection_with_gs path = addextension(path, ext) scheme, bucket_name, keylist = self.getfiles( path, start=start, stop=stop, recursive=recursive) if not keylist: raise FileNotFoundError("No objects found for '%s'" % path) credentials = self.credentials self.nfiles = len(keylist) if spark and isinstance(self.engine, spark): def getsplit(kvIter): if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = boto.storage_uri(bucket_name, 'gs') bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) for kv in kvIter: idx, keyname = kv key = bucket.get_key(keyname) buf = key.get_contents_as_string() yield idx, buf, keyname npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles rdd = self.engine.parallelize(enumerate(keylist), npartitions) return rdd.mapPartitions(getsplit) else: if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) def getsplit(kv): idx, keyName = kv key = bucket.get_key(keyName) buf = key.get_contents_as_string() return idx, buf, keyName return [getsplit(kv) for kv in enumerate(keylist)]
[ "def", "read", "(", "self", ",", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "path", "=", "addextension", "(", "path", ",", "ext", ")", "scheme", ",", "bucket_name", ",", "keylist", "=", "self", ".", "getfiles", "(", "path", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ")", "if", "not", "keylist", ":", "raise", "FileNotFoundError", "(", "\"No objects found for '%s'\"", "%", "path", ")", "credentials", "=", "self", ".", "credentials", "self", ".", "nfiles", "=", "len", "(", "keylist", ")", "if", "spark", "and", "isinstance", "(", "self", ".", "engine", ",", "spark", ")", ":", "def", "getsplit", "(", "kvIter", ")", ":", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "boto", ".", "storage_uri", "(", "bucket_name", ",", "'gs'", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "for", "kv", "in", "kvIter", ":", "idx", ",", "keyname", "=", "kv", "key", "=", "bucket", ".", "get_key", "(", "keyname", ")", "buf", "=", "key", ".", "get_contents_as_string", "(", ")", "yield", "idx", ",", "buf", ",", "keyname", "npartitions", "=", "min", "(", "npartitions", ",", "self", ".", "nfiles", ")", "if", "npartitions", "else", "self", ".", "nfiles", "rdd", "=", "self", ".", "engine", ".", "parallelize", "(", "enumerate", "(", "keylist", ")", ",", "npartitions", ")", "return", "rdd", ".", "mapPartitions", "(", "getsplit", ")", "else", ":", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "def", "getsplit", "(", "kv", ")", ":", "idx", ",", "keyName", "=", "kv", "key", "=", "bucket", ".", "get_key", "(", "keyName", ")", "buf", "=", "key", ".", "get_contents_as_string", "(", ")", "return", "idx", ",", "buf", ",", "keyName", "return", "[", "getsplit", "(", "kv", ")", "for", "kv", "in", "enumerate", "(", "keylist", ")", "]" ]
Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs.
[ "Sets", "up", "Spark", "RDD", "across", "S3", "or", "GS", "objects", "specified", "by", "dataPath", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L372-L430
12,356
thunder-project/thunder
thunder/readers.py
BotoFileReader.getkeys
def getkeys(self, path, filename=None, directories=False, recursive=False): """ Get matching keys for a path """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] key = parse[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if filename: if not key.endswith("/"): if self.check_prefix(bucket, key + "/"): key += "/" else: index = key.rfind("/") if index >= 0: key = key[:(index+1)] else: key = "" key += filename keylist = BotoClient.retrieve_keys(bucket, key, prefix=parse[3], postfix=parse[4], directories=directories, recursive=recursive) return scheme, keylist
python
def getkeys(self, path, filename=None, directories=False, recursive=False): """ Get matching keys for a path """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] key = parse[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if filename: if not key.endswith("/"): if self.check_prefix(bucket, key + "/"): key += "/" else: index = key.rfind("/") if index >= 0: key = key[:(index+1)] else: key = "" key += filename keylist = BotoClient.retrieve_keys(bucket, key, prefix=parse[3], postfix=parse[4], directories=directories, recursive=recursive) return scheme, keylist
[ "def", "getkeys", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "directories", "=", "False", ",", "recursive", "=", "False", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parse", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parse", "[", "0", "]", "bucket_name", "=", "parse", "[", "1", "]", "key", "=", "parse", "[", "2", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "if", "filename", ":", "if", "not", "key", ".", "endswith", "(", "\"/\"", ")", ":", "if", "self", ".", "check_prefix", "(", "bucket", ",", "key", "+", "\"/\"", ")", ":", "key", "+=", "\"/\"", "else", ":", "index", "=", "key", ".", "rfind", "(", "\"/\"", ")", "if", "index", ">=", "0", ":", "key", "=", "key", "[", ":", "(", "index", "+", "1", ")", "]", "else", ":", "key", "=", "\"\"", "key", "+=", "filename", "keylist", "=", "BotoClient", ".", "retrieve_keys", "(", "bucket", ",", "key", ",", "prefix", "=", "parse", "[", "3", "]", ",", "postfix", "=", "parse", "[", "4", "]", ",", "directories", "=", "directories", ",", "recursive", "=", "recursive", ")", "return", "scheme", ",", "keylist" ]
Get matching keys for a path
[ "Get", "matching", "keys", "for", "a", "path" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L437-L472
12,357
thunder-project/thunder
thunder/readers.py
BotoFileReader.getkey
def getkey(self, path, filename=None): """ Get single matching key for a path """ scheme, keys = self.getkeys(path, filename=filename) try: key = next(keys) except StopIteration: raise FileNotFoundError("Could not find object for: '%s'" % path) # we expect to only have a single key returned nextKey = None try: nextKey = next(keys) except StopIteration: pass if nextKey: raise ValueError("Found multiple keys for: '%s'" % path) return scheme, key
python
def getkey(self, path, filename=None): """ Get single matching key for a path """ scheme, keys = self.getkeys(path, filename=filename) try: key = next(keys) except StopIteration: raise FileNotFoundError("Could not find object for: '%s'" % path) # we expect to only have a single key returned nextKey = None try: nextKey = next(keys) except StopIteration: pass if nextKey: raise ValueError("Found multiple keys for: '%s'" % path) return scheme, key
[ "def", "getkey", "(", "self", ",", "path", ",", "filename", "=", "None", ")", ":", "scheme", ",", "keys", "=", "self", ".", "getkeys", "(", "path", ",", "filename", "=", "filename", ")", "try", ":", "key", "=", "next", "(", "keys", ")", "except", "StopIteration", ":", "raise", "FileNotFoundError", "(", "\"Could not find object for: '%s'\"", "%", "path", ")", "# we expect to only have a single key returned", "nextKey", "=", "None", "try", ":", "nextKey", "=", "next", "(", "keys", ")", "except", "StopIteration", ":", "pass", "if", "nextKey", ":", "raise", "ValueError", "(", "\"Found multiple keys for: '%s'\"", "%", "path", ")", "return", "scheme", ",", "key" ]
Get single matching key for a path
[ "Get", "single", "matching", "key", "for", "a", "path" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L474-L492
12,358
thunder-project/thunder
thunder/readers.py
BotoFileReader.list
def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List objects specified by path. Returns sorted list of 'gs://' or 's3n://' URIs. """ storageScheme, keys = self.getkeys( path, filename=filename, directories=directories, recursive=recursive) keys = [storageScheme + ":///" + key.bucket.name + "/" + key.name for key in keys] keys.sort() keys = select(keys, start, stop) return keys
python
def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List objects specified by path. Returns sorted list of 'gs://' or 's3n://' URIs. """ storageScheme, keys = self.getkeys( path, filename=filename, directories=directories, recursive=recursive) keys = [storageScheme + ":///" + key.bucket.name + "/" + key.name for key in keys] keys.sort() keys = select(keys, start, stop) return keys
[ "def", "list", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "directories", "=", "False", ")", ":", "storageScheme", ",", "keys", "=", "self", ".", "getkeys", "(", "path", ",", "filename", "=", "filename", ",", "directories", "=", "directories", ",", "recursive", "=", "recursive", ")", "keys", "=", "[", "storageScheme", "+", "\":///\"", "+", "key", ".", "bucket", ".", "name", "+", "\"/\"", "+", "key", ".", "name", "for", "key", "in", "keys", "]", "keys", ".", "sort", "(", ")", "keys", "=", "select", "(", "keys", ",", "start", ",", "stop", ")", "return", "keys" ]
List objects specified by path. Returns sorted list of 'gs://' or 's3n://' URIs.
[ "List", "objects", "specified", "by", "path", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L494-L505
12,359
thunder-project/thunder
thunder/readers.py
BotoFileReader.read
def read(self, path, filename=None, offset=None, size=-1): """ Read a file specified by path. """ storageScheme, key = self.getkey(path, filename=filename) if offset or (size > -1): if not offset: offset = 0 if size > -1: sizeStr = offset + size - 1 # range header is inclusive else: sizeStr = "" headers = {"Range": "bytes=%d-%s" % (offset, sizeStr)} return key.get_contents_as_string(headers=headers) else: return key.get_contents_as_string()
python
def read(self, path, filename=None, offset=None, size=-1): """ Read a file specified by path. """ storageScheme, key = self.getkey(path, filename=filename) if offset or (size > -1): if not offset: offset = 0 if size > -1: sizeStr = offset + size - 1 # range header is inclusive else: sizeStr = "" headers = {"Range": "bytes=%d-%s" % (offset, sizeStr)} return key.get_contents_as_string(headers=headers) else: return key.get_contents_as_string()
[ "def", "read", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "offset", "=", "None", ",", "size", "=", "-", "1", ")", ":", "storageScheme", ",", "key", "=", "self", ".", "getkey", "(", "path", ",", "filename", "=", "filename", ")", "if", "offset", "or", "(", "size", ">", "-", "1", ")", ":", "if", "not", "offset", ":", "offset", "=", "0", "if", "size", ">", "-", "1", ":", "sizeStr", "=", "offset", "+", "size", "-", "1", "# range header is inclusive", "else", ":", "sizeStr", "=", "\"\"", "headers", "=", "{", "\"Range\"", ":", "\"bytes=%d-%s\"", "%", "(", "offset", ",", "sizeStr", ")", "}", "return", "key", ".", "get_contents_as_string", "(", "headers", "=", "headers", ")", "else", ":", "return", "key", ".", "get_contents_as_string", "(", ")" ]
Read a file specified by path.
[ "Read", "a", "file", "specified", "by", "path", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L507-L523
12,360
thunder-project/thunder
thunder/readers.py
BotoFileReader.open
def open(self, path, filename=None): """ Open a file specified by path. """ scheme, key = self.getkey(path, filename=filename) return BotoReadFileHandle(scheme, key)
python
def open(self, path, filename=None): """ Open a file specified by path. """ scheme, key = self.getkey(path, filename=filename) return BotoReadFileHandle(scheme, key)
[ "def", "open", "(", "self", ",", "path", ",", "filename", "=", "None", ")", ":", "scheme", ",", "key", "=", "self", ".", "getkey", "(", "path", ",", "filename", "=", "filename", ")", "return", "BotoReadFileHandle", "(", "scheme", ",", "key", ")" ]
Open a file specified by path.
[ "Open", "a", "file", "specified", "by", "path", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L525-L530
12,361
thunder-project/thunder
thunder/utils.py
check_path
def check_path(path, credentials=None): """ Check that specified output path does not already exist The ValueError message will suggest calling with overwrite=True; this function is expected to be called from the various output methods that accept an 'overwrite' keyword argument. """ from thunder.readers import get_file_reader reader = get_file_reader(path)(credentials=credentials) existing = reader.list(path, directories=True) if existing: raise ValueError('Path %s appears to already exist. Specify a new directory, ' 'or call with overwrite=True to overwrite.' % path)
python
def check_path(path, credentials=None): """ Check that specified output path does not already exist The ValueError message will suggest calling with overwrite=True; this function is expected to be called from the various output methods that accept an 'overwrite' keyword argument. """ from thunder.readers import get_file_reader reader = get_file_reader(path)(credentials=credentials) existing = reader.list(path, directories=True) if existing: raise ValueError('Path %s appears to already exist. Specify a new directory, ' 'or call with overwrite=True to overwrite.' % path)
[ "def", "check_path", "(", "path", ",", "credentials", "=", "None", ")", ":", "from", "thunder", ".", "readers", "import", "get_file_reader", "reader", "=", "get_file_reader", "(", "path", ")", "(", "credentials", "=", "credentials", ")", "existing", "=", "reader", ".", "list", "(", "path", ",", "directories", "=", "True", ")", "if", "existing", ":", "raise", "ValueError", "(", "'Path %s appears to already exist. Specify a new directory, '", "'or call with overwrite=True to overwrite.'", "%", "path", ")" ]
Check that specified output path does not already exist The ValueError message will suggest calling with overwrite=True; this function is expected to be called from the various output methods that accept an 'overwrite' keyword argument.
[ "Check", "that", "specified", "output", "path", "does", "not", "already", "exist" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/utils.py#L18-L31
12,362
thunder-project/thunder
thunder/utils.py
connection_with_anon
def connection_with_anon(credentials, anon=True): """ Connect to S3 with automatic handling for anonymous access. Parameters ---------- credentials : dict AWS access key ('access') and secret access key ('secret') anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate """ from boto.s3.connection import S3Connection from boto.exception import NoAuthHandlerFound try: conn = S3Connection(aws_access_key_id=credentials['access'], aws_secret_access_key=credentials['secret']) return conn except NoAuthHandlerFound: if anon: conn = S3Connection(anon=True) return conn else: raise
python
def connection_with_anon(credentials, anon=True): """ Connect to S3 with automatic handling for anonymous access. Parameters ---------- credentials : dict AWS access key ('access') and secret access key ('secret') anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate """ from boto.s3.connection import S3Connection from boto.exception import NoAuthHandlerFound try: conn = S3Connection(aws_access_key_id=credentials['access'], aws_secret_access_key=credentials['secret']) return conn except NoAuthHandlerFound: if anon: conn = S3Connection(anon=True) return conn else: raise
[ "def", "connection_with_anon", "(", "credentials", ",", "anon", "=", "True", ")", ":", "from", "boto", ".", "s3", ".", "connection", "import", "S3Connection", "from", "boto", ".", "exception", "import", "NoAuthHandlerFound", "try", ":", "conn", "=", "S3Connection", "(", "aws_access_key_id", "=", "credentials", "[", "'access'", "]", ",", "aws_secret_access_key", "=", "credentials", "[", "'secret'", "]", ")", "return", "conn", "except", "NoAuthHandlerFound", ":", "if", "anon", ":", "conn", "=", "S3Connection", "(", "anon", "=", "True", ")", "return", "conn", "else", ":", "raise" ]
Connect to S3 with automatic handling for anonymous access. Parameters ---------- credentials : dict AWS access key ('access') and secret access key ('secret') anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate
[ "Connect", "to", "S3", "with", "automatic", "handling", "for", "anonymous", "access", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/utils.py#L33-L58
12,363
thunder-project/thunder
thunder/writers.py
BotoWriter.activate
def activate(self, path, isdirectory): """ Set up a boto connection. """ from .utils import connection_with_anon, connection_with_gs parsed = BotoClient.parse_query(path) scheme = parsed[0] bucket_name = parsed[1] key = parsed[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if isdirectory and (not key.endswith("/")): key += "/" self._scheme = scheme self._conn = conn self._key = key self._bucket = bucket self._active = True
python
def activate(self, path, isdirectory): """ Set up a boto connection. """ from .utils import connection_with_anon, connection_with_gs parsed = BotoClient.parse_query(path) scheme = parsed[0] bucket_name = parsed[1] key = parsed[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if isdirectory and (not key.endswith("/")): key += "/" self._scheme = scheme self._conn = conn self._key = key self._bucket = bucket self._active = True
[ "def", "activate", "(", "self", ",", "path", ",", "isdirectory", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parsed", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parsed", "[", "0", "]", "bucket_name", "=", "parsed", "[", "1", "]", "key", "=", "parsed", "[", "2", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "if", "isdirectory", "and", "(", "not", "key", ".", "endswith", "(", "\"/\"", ")", ")", ":", "key", "+=", "\"/\"", "self", ".", "_scheme", "=", "scheme", "self", ".", "_conn", "=", "conn", "self", ".", "_key", "=", "key", "self", ".", "_bucket", "=", "bucket", "self", ".", "_active", "=", "True" ]
Set up a boto connection.
[ "Set", "up", "a", "boto", "connection", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/writers.py#L50-L78
12,364
thunder-project/thunder
thunder/images/writers.py
topng
def topng(images, path, prefix="image", overwrite=False, credentials=None): """ Write out PNG files for 2d image data. See also -------- thunder.data.images.topng """ value_shape = images.value_shape if not len(value_shape) in [2, 3]: raise ValueError("Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len(value_shape)) from scipy.misc import imsave from io import BytesIO from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix+"-"+"%05d.png" % int(key) bytebuf = BytesIO() imsave(bytebuf, img, format='PNG') return fname, bytebuf.getvalue() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x)))
python
def topng(images, path, prefix="image", overwrite=False, credentials=None): """ Write out PNG files for 2d image data. See also -------- thunder.data.images.topng """ value_shape = images.value_shape if not len(value_shape) in [2, 3]: raise ValueError("Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len(value_shape)) from scipy.misc import imsave from io import BytesIO from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix+"-"+"%05d.png" % int(key) bytebuf = BytesIO() imsave(bytebuf, img, format='PNG') return fname, bytebuf.getvalue() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x)))
[ "def", "topng", "(", "images", ",", "path", ",", "prefix", "=", "\"image\"", ",", "overwrite", "=", "False", ",", "credentials", "=", "None", ")", ":", "value_shape", "=", "images", ".", "value_shape", "if", "not", "len", "(", "value_shape", ")", "in", "[", "2", ",", "3", "]", ":", "raise", "ValueError", "(", "\"Only 2D or 3D images can be exported to png, \"", "\"images are %d-dimensional.\"", "%", "len", "(", "value_shape", ")", ")", "from", "scipy", ".", "misc", "import", "imsave", "from", "io", "import", "BytesIO", "from", "thunder", ".", "writers", "import", "get_parallel_writer", "def", "tobuffer", "(", "kv", ")", ":", "key", ",", "img", "=", "kv", "fname", "=", "prefix", "+", "\"-\"", "+", "\"%05d.png\"", "%", "int", "(", "key", ")", "bytebuf", "=", "BytesIO", "(", ")", "imsave", "(", "bytebuf", ",", "img", ",", "format", "=", "'PNG'", ")", "return", "fname", ",", "bytebuf", ".", "getvalue", "(", ")", "writer", "=", "get_parallel_writer", "(", "path", ")", "(", "path", ",", "overwrite", "=", "overwrite", ",", "credentials", "=", "credentials", ")", "images", ".", "foreach", "(", "lambda", "x", ":", "writer", ".", "write", "(", "tobuffer", "(", "x", ")", ")", ")" ]
Write out PNG files for 2d image data. See also -------- thunder.data.images.topng
[ "Write", "out", "PNG", "files", "for", "2d", "image", "data", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/writers.py#L4-L29
12,365
thunder-project/thunder
thunder/images/writers.py
tobinary
def tobinary(images, path, prefix="image", overwrite=False, credentials=None): """ Write out images as binary files. See also -------- thunder.data.images.tobinary """ from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix + "-" + "%05d.bin" % int(key) return fname, img.copy() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x))) config(path, list(images.value_shape), images.dtype, overwrite=overwrite)
python
def tobinary(images, path, prefix="image", overwrite=False, credentials=None): """ Write out images as binary files. See also -------- thunder.data.images.tobinary """ from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix + "-" + "%05d.bin" % int(key) return fname, img.copy() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x))) config(path, list(images.value_shape), images.dtype, overwrite=overwrite)
[ "def", "tobinary", "(", "images", ",", "path", ",", "prefix", "=", "\"image\"", ",", "overwrite", "=", "False", ",", "credentials", "=", "None", ")", ":", "from", "thunder", ".", "writers", "import", "get_parallel_writer", "def", "tobuffer", "(", "kv", ")", ":", "key", ",", "img", "=", "kv", "fname", "=", "prefix", "+", "\"-\"", "+", "\"%05d.bin\"", "%", "int", "(", "key", ")", "return", "fname", ",", "img", ".", "copy", "(", ")", "writer", "=", "get_parallel_writer", "(", "path", ")", "(", "path", ",", "overwrite", "=", "overwrite", ",", "credentials", "=", "credentials", ")", "images", ".", "foreach", "(", "lambda", "x", ":", "writer", ".", "write", "(", "tobuffer", "(", "x", ")", ")", ")", "config", "(", "path", ",", "list", "(", "images", ".", "value_shape", ")", ",", "images", ".", "dtype", ",", "overwrite", "=", "overwrite", ")" ]
Write out images as binary files. See also -------- thunder.data.images.tobinary
[ "Write", "out", "images", "as", "binary", "files", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/writers.py#L58-L75
12,366
lidaobing/python-lunardate
lunardate.py
yearInfo2yearDay
def yearInfo2yearDay(yearInfo): '''calculate the days in a lunar year from the lunar year's info >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days. 348 >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days. 377 >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days. 360 >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days. 390 >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days. >>> yearInfo2yearDay((2**12-1)*16+1) 389 ''' yearInfo = int(yearInfo) res = 29 * 12 leap = False if yearInfo % 16 != 0: leap = True res += 29 yearInfo //= 16 for i in range(12 + leap): if yearInfo % 2 == 1: res += 1 yearInfo //= 2 return res
python
def yearInfo2yearDay(yearInfo): '''calculate the days in a lunar year from the lunar year's info >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days. 348 >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days. 377 >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days. 360 >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days. 390 >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days. >>> yearInfo2yearDay((2**12-1)*16+1) 389 ''' yearInfo = int(yearInfo) res = 29 * 12 leap = False if yearInfo % 16 != 0: leap = True res += 29 yearInfo //= 16 for i in range(12 + leap): if yearInfo % 2 == 1: res += 1 yearInfo //= 2 return res
[ "def", "yearInfo2yearDay", "(", "yearInfo", ")", ":", "yearInfo", "=", "int", "(", "yearInfo", ")", "res", "=", "29", "*", "12", "leap", "=", "False", "if", "yearInfo", "%", "16", "!=", "0", ":", "leap", "=", "True", "res", "+=", "29", "yearInfo", "//=", "16", "for", "i", "in", "range", "(", "12", "+", "leap", ")", ":", "if", "yearInfo", "%", "2", "==", "1", ":", "res", "+=", "1", "yearInfo", "//=", "2", "return", "res" ]
calculate the days in a lunar year from the lunar year's info >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days. 348 >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days. 377 >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days. 360 >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days. 390 >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days. >>> yearInfo2yearDay((2**12-1)*16+1) 389
[ "calculate", "the", "days", "in", "a", "lunar", "year", "from", "the", "lunar", "year", "s", "info" ]
261334a27d772489c9fc70b8ecef129ba3c13118
https://github.com/lidaobing/python-lunardate/blob/261334a27d772489c9fc70b8ecef129ba3c13118/lunardate.py#L367-L397
12,367
plone/plone.app.mosaic
src/plone/app/mosaic/browser/upload.py
MosaicUploadView.cleanupFilename
def cleanupFilename(self, name): """Generate a unique id which doesn't match the system generated ids""" context = self.context id = '' name = name.replace('\\', '/') # Fixup Windows filenames name = name.split('/')[-1] # Throw away any path part. for c in name: if c.isalnum() or c in '._': id += c # Raise condition here, but not a lot we can do about that if context.check_id(id) is None and getattr(context, id, None) is None: return id # Now make the id unique count = 1 while 1: if count == 1: sc = '' else: sc = str(count) newid = "copy{0:s}_of_{1:s}".format(sc, id) if context.check_id(newid) is None \ and getattr(context, newid, None) is None: return newid count += 1
python
def cleanupFilename(self, name): """Generate a unique id which doesn't match the system generated ids""" context = self.context id = '' name = name.replace('\\', '/') # Fixup Windows filenames name = name.split('/')[-1] # Throw away any path part. for c in name: if c.isalnum() or c in '._': id += c # Raise condition here, but not a lot we can do about that if context.check_id(id) is None and getattr(context, id, None) is None: return id # Now make the id unique count = 1 while 1: if count == 1: sc = '' else: sc = str(count) newid = "copy{0:s}_of_{1:s}".format(sc, id) if context.check_id(newid) is None \ and getattr(context, newid, None) is None: return newid count += 1
[ "def", "cleanupFilename", "(", "self", ",", "name", ")", ":", "context", "=", "self", ".", "context", "id", "=", "''", "name", "=", "name", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# Fixup Windows filenames", "name", "=", "name", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "# Throw away any path part.", "for", "c", "in", "name", ":", "if", "c", ".", "isalnum", "(", ")", "or", "c", "in", "'._'", ":", "id", "+=", "c", "# Raise condition here, but not a lot we can do about that", "if", "context", ".", "check_id", "(", "id", ")", "is", "None", "and", "getattr", "(", "context", ",", "id", ",", "None", ")", "is", "None", ":", "return", "id", "# Now make the id unique", "count", "=", "1", "while", "1", ":", "if", "count", "==", "1", ":", "sc", "=", "''", "else", ":", "sc", "=", "str", "(", "count", ")", "newid", "=", "\"copy{0:s}_of_{1:s}\"", ".", "format", "(", "sc", ",", "id", ")", "if", "context", ".", "check_id", "(", "newid", ")", "is", "None", "and", "getattr", "(", "context", ",", "newid", ",", "None", ")", "is", "None", ":", "return", "newid", "count", "+=", "1" ]
Generate a unique id which doesn't match the system generated ids
[ "Generate", "a", "unique", "id", "which", "doesn", "t", "match", "the", "system", "generated", "ids" ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/upload.py#L80-L106
12,368
plone/plone.app.mosaic
src/plone/app/mosaic/browser/main_template.py
parse_data_slots
def parse_data_slots(value): """Parse data-slots value into slots used to wrap node, prepend to node or append to node. >>> parse_data_slots('') ([], [], []) >>> parse_data_slots('foo bar') (['foo', 'bar'], [], []) >>> parse_data_slots('foo bar > foobar') (['foo', 'bar'], ['foobar'], []) >>> parse_data_slots('> foobar') ([], ['foobar'], []) >>> parse_data_slots('> foo * bar') ([], ['foo'], ['bar']) >>> parse_data_slots('foobar > foo * bar') (['foobar'], ['foo'], ['bar']) >>> parse_data_slots('foo > * bar') (['foo'], [], ['bar']) """ value = unquote(value) if '>' in value: wrappers, children = value.split('>', 1) else: wrappers = value children = '' if '*' in children: prepends, appends = children.split('*', 1) else: prepends = children appends = '' wrappers = list(filter(bool, list(map(str.strip, wrappers.split())))) prepends = list(filter(bool, list(map(str.strip, prepends.split())))) appends = list(filter(bool, list(map(str.strip, appends.split())))) return wrappers, prepends, appends
python
def parse_data_slots(value): """Parse data-slots value into slots used to wrap node, prepend to node or append to node. >>> parse_data_slots('') ([], [], []) >>> parse_data_slots('foo bar') (['foo', 'bar'], [], []) >>> parse_data_slots('foo bar > foobar') (['foo', 'bar'], ['foobar'], []) >>> parse_data_slots('> foobar') ([], ['foobar'], []) >>> parse_data_slots('> foo * bar') ([], ['foo'], ['bar']) >>> parse_data_slots('foobar > foo * bar') (['foobar'], ['foo'], ['bar']) >>> parse_data_slots('foo > * bar') (['foo'], [], ['bar']) """ value = unquote(value) if '>' in value: wrappers, children = value.split('>', 1) else: wrappers = value children = '' if '*' in children: prepends, appends = children.split('*', 1) else: prepends = children appends = '' wrappers = list(filter(bool, list(map(str.strip, wrappers.split())))) prepends = list(filter(bool, list(map(str.strip, prepends.split())))) appends = list(filter(bool, list(map(str.strip, appends.split())))) return wrappers, prepends, appends
[ "def", "parse_data_slots", "(", "value", ")", ":", "value", "=", "unquote", "(", "value", ")", "if", "'>'", "in", "value", ":", "wrappers", ",", "children", "=", "value", ".", "split", "(", "'>'", ",", "1", ")", "else", ":", "wrappers", "=", "value", "children", "=", "''", "if", "'*'", "in", "children", ":", "prepends", ",", "appends", "=", "children", ".", "split", "(", "'*'", ",", "1", ")", "else", ":", "prepends", "=", "children", "appends", "=", "''", "wrappers", "=", "list", "(", "filter", "(", "bool", ",", "list", "(", "map", "(", "str", ".", "strip", ",", "wrappers", ".", "split", "(", ")", ")", ")", ")", ")", "prepends", "=", "list", "(", "filter", "(", "bool", ",", "list", "(", "map", "(", "str", ".", "strip", ",", "prepends", ".", "split", "(", ")", ")", ")", ")", ")", "appends", "=", "list", "(", "filter", "(", "bool", ",", "list", "(", "map", "(", "str", ".", "strip", ",", "appends", ".", "split", "(", ")", ")", ")", ")", ")", "return", "wrappers", ",", "prepends", ",", "appends" ]
Parse data-slots value into slots used to wrap node, prepend to node or append to node. >>> parse_data_slots('') ([], [], []) >>> parse_data_slots('foo bar') (['foo', 'bar'], [], []) >>> parse_data_slots('foo bar > foobar') (['foo', 'bar'], ['foobar'], []) >>> parse_data_slots('> foobar') ([], ['foobar'], []) >>> parse_data_slots('> foo * bar') ([], ['foo'], ['bar']) >>> parse_data_slots('foobar > foo * bar') (['foobar'], ['foo'], ['bar']) >>> parse_data_slots('foo > * bar') (['foo'], [], ['bar'])
[ "Parse", "data", "-", "slots", "value", "into", "slots", "used", "to", "wrap", "node", "prepend", "to", "node", "or", "append", "to", "node", "." ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/main_template.py#L65-L107
12,369
plone/plone.app.mosaic
src/plone/app/mosaic/browser/main_template.py
cook_layout
def cook_layout(layout, ajax): """Return main_template compatible layout""" # Fix XHTML layouts with CR[+LF] line endings layout = re.sub('\r', '\n', re.sub('\r\n', '\n', layout)) # Parse layout if isinstance(layout, six.text_type): result = getHTMLSerializer([layout.encode('utf-8')], encoding='utf-8') else: result = getHTMLSerializer([layout], encoding='utf-8') # Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[) if '<![CDATA[' in layout: result.serializer = html.tostring # Wrap all panels with a metal:fill-slot -tag: all_slots = [] for layoutPanelNode in slotsXPath(result.tree): data_slots = layoutPanelNode.attrib['data-slots'] all_slots += wrap_append_prepend_slots(layoutPanelNode, data_slots) del layoutPanelNode.attrib['data-slots'] # When no slots are explicitly defined, try to inject the very default # slots if len(all_slots) == 0: for node in result.tree.xpath('//*[@data-panel="content"]'): wrap_append_prepend_slots( node, 'content > body header main * content-core') # Append implicit slots head = result.tree.getroot().find('head') if not ajax and head is not None: for name in ['top_slot', 'head_slot', 'style_slot', 'javascript_head_slot']: slot = etree.Element('{{{0:s}}}{1:s}'.format(NSMAP['metal'], name), nsmap=NSMAP) slot.attrib['define-slot'] = name head.append(slot) template = TEMPLATE metal = 'xmlns:metal="http://namespaces.zope.org/metal"' return (template % ''.join(result)).replace(metal, '')
python
def cook_layout(layout, ajax): """Return main_template compatible layout""" # Fix XHTML layouts with CR[+LF] line endings layout = re.sub('\r', '\n', re.sub('\r\n', '\n', layout)) # Parse layout if isinstance(layout, six.text_type): result = getHTMLSerializer([layout.encode('utf-8')], encoding='utf-8') else: result = getHTMLSerializer([layout], encoding='utf-8') # Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[) if '<![CDATA[' in layout: result.serializer = html.tostring # Wrap all panels with a metal:fill-slot -tag: all_slots = [] for layoutPanelNode in slotsXPath(result.tree): data_slots = layoutPanelNode.attrib['data-slots'] all_slots += wrap_append_prepend_slots(layoutPanelNode, data_slots) del layoutPanelNode.attrib['data-slots'] # When no slots are explicitly defined, try to inject the very default # slots if len(all_slots) == 0: for node in result.tree.xpath('//*[@data-panel="content"]'): wrap_append_prepend_slots( node, 'content > body header main * content-core') # Append implicit slots head = result.tree.getroot().find('head') if not ajax and head is not None: for name in ['top_slot', 'head_slot', 'style_slot', 'javascript_head_slot']: slot = etree.Element('{{{0:s}}}{1:s}'.format(NSMAP['metal'], name), nsmap=NSMAP) slot.attrib['define-slot'] = name head.append(slot) template = TEMPLATE metal = 'xmlns:metal="http://namespaces.zope.org/metal"' return (template % ''.join(result)).replace(metal, '')
[ "def", "cook_layout", "(", "layout", ",", "ajax", ")", ":", "# Fix XHTML layouts with CR[+LF] line endings", "layout", "=", "re", ".", "sub", "(", "'\\r'", ",", "'\\n'", ",", "re", ".", "sub", "(", "'\\r\\n'", ",", "'\\n'", ",", "layout", ")", ")", "# Parse layout", "if", "isinstance", "(", "layout", ",", "six", ".", "text_type", ")", ":", "result", "=", "getHTMLSerializer", "(", "[", "layout", ".", "encode", "(", "'utf-8'", ")", "]", ",", "encoding", "=", "'utf-8'", ")", "else", ":", "result", "=", "getHTMLSerializer", "(", "[", "layout", "]", ",", "encoding", "=", "'utf-8'", ")", "# Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[)", "if", "'<![CDATA['", "in", "layout", ":", "result", ".", "serializer", "=", "html", ".", "tostring", "# Wrap all panels with a metal:fill-slot -tag:", "all_slots", "=", "[", "]", "for", "layoutPanelNode", "in", "slotsXPath", "(", "result", ".", "tree", ")", ":", "data_slots", "=", "layoutPanelNode", ".", "attrib", "[", "'data-slots'", "]", "all_slots", "+=", "wrap_append_prepend_slots", "(", "layoutPanelNode", ",", "data_slots", ")", "del", "layoutPanelNode", ".", "attrib", "[", "'data-slots'", "]", "# When no slots are explicitly defined, try to inject the very default", "# slots", "if", "len", "(", "all_slots", ")", "==", "0", ":", "for", "node", "in", "result", ".", "tree", ".", "xpath", "(", "'//*[@data-panel=\"content\"]'", ")", ":", "wrap_append_prepend_slots", "(", "node", ",", "'content > body header main * content-core'", ")", "# Append implicit slots", "head", "=", "result", ".", "tree", ".", "getroot", "(", ")", ".", "find", "(", "'head'", ")", "if", "not", "ajax", "and", "head", "is", "not", "None", ":", "for", "name", "in", "[", "'top_slot'", ",", "'head_slot'", ",", "'style_slot'", ",", "'javascript_head_slot'", "]", ":", "slot", "=", "etree", ".", "Element", "(", "'{{{0:s}}}{1:s}'", ".", "format", "(", "NSMAP", "[", "'metal'", "]", ",", "name", ")", ",", "nsmap", "=", "NSMAP", ")", "slot", ".", "attrib", "[", "'define-slot'", "]", "=", "name", "head", ".", "append", "(", "slot", ")", "template", "=", "TEMPLATE", "metal", "=", "'xmlns:metal=\"http://namespaces.zope.org/metal\"'", "return", "(", "template", "%", "''", ".", "join", "(", "result", ")", ")", ".", "replace", "(", "metal", ",", "''", ")" ]
Return main_template compatible layout
[ "Return", "main_template", "compatible", "layout" ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/main_template.py#L138-L179
12,370
plone/plone.app.mosaic
src/plone/app/mosaic/browser/editor.py
ManageLayoutView.existing
def existing(self): """ find existing content assigned to this layout""" catalog = api.portal.get_tool('portal_catalog') results = [] layout_path = self._get_layout_path( self.request.form.get('layout', '') ) for brain in catalog(layout=layout_path): results.append({ 'title': brain.Title, 'url': brain.getURL() }) return json.dumps({ 'total': len(results), 'data': results })
python
def existing(self): """ find existing content assigned to this layout""" catalog = api.portal.get_tool('portal_catalog') results = [] layout_path = self._get_layout_path( self.request.form.get('layout', '') ) for brain in catalog(layout=layout_path): results.append({ 'title': brain.Title, 'url': brain.getURL() }) return json.dumps({ 'total': len(results), 'data': results })
[ "def", "existing", "(", "self", ")", ":", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "results", "=", "[", "]", "layout_path", "=", "self", ".", "_get_layout_path", "(", "self", ".", "request", ".", "form", ".", "get", "(", "'layout'", ",", "''", ")", ")", "for", "brain", "in", "catalog", "(", "layout", "=", "layout_path", ")", ":", "results", ".", "append", "(", "{", "'title'", ":", "brain", ".", "Title", ",", "'url'", ":", "brain", ".", "getURL", "(", ")", "}", ")", "return", "json", ".", "dumps", "(", "{", "'total'", ":", "len", "(", "results", ")", ",", "'data'", ":", "results", "}", ")" ]
find existing content assigned to this layout
[ "find", "existing", "content", "assigned", "to", "this", "layout" ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/editor.py#L127-L142
12,371
sergiocorreia/panflute
panflute/io.py
load_reader_options
def load_reader_options(): """ Retrieve Pandoc Reader options from the environment """ options = os.environ['PANDOC_READER_OPTIONS'] options = json.loads(options, object_pairs_hook=OrderedDict) return options
python
def load_reader_options(): """ Retrieve Pandoc Reader options from the environment """ options = os.environ['PANDOC_READER_OPTIONS'] options = json.loads(options, object_pairs_hook=OrderedDict) return options
[ "def", "load_reader_options", "(", ")", ":", "options", "=", "os", ".", "environ", "[", "'PANDOC_READER_OPTIONS'", "]", "options", "=", "json", ".", "loads", "(", "options", ",", "object_pairs_hook", "=", "OrderedDict", ")", "return", "options" ]
Retrieve Pandoc Reader options from the environment
[ "Retrieve", "Pandoc", "Reader", "options", "from", "the", "environment" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L263-L269
12,372
sergiocorreia/panflute
panflute/tools.py
yaml_filter
def yaml_filter(element, doc, tag=None, function=None, tags=None, strict_yaml=False): ''' Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action) ''' # Allow for either tag+function or a dict {tag: function} assert (tag is None) + (tags is None) == 1 # XOR if tags is None: tags = {tag: function} if type(element) == CodeBlock: for tag in tags: if tag in element.classes: function = tags[tag] if not strict_yaml: # Split YAML and data parts (separated by ... or ---) raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 1, re.MULTILINE) data = raw[2] if len(raw) > 2 else '' data = data.lstrip('\n') raw = raw[0] try: options = yaml.safe_load(raw) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return if options is None: options = {} else: options = {} data = [] raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 0, re.MULTILINE) rawmode = True for chunk in raw: chunk = chunk.strip('\n') if not chunk: continue if rawmode: if chunk.startswith('---'): rawmode = False else: data.append(chunk) else: if chunk.startswith('---') or chunk.startswith('...'): rawmode = True else: try: options.update(yaml.safe_load(chunk)) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return data = '\n'.join(data) return function(options=options, data=data, element=element, doc=doc)
python
def yaml_filter(element, doc, tag=None, function=None, tags=None, strict_yaml=False): ''' Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action) ''' # Allow for either tag+function or a dict {tag: function} assert (tag is None) + (tags is None) == 1 # XOR if tags is None: tags = {tag: function} if type(element) == CodeBlock: for tag in tags: if tag in element.classes: function = tags[tag] if not strict_yaml: # Split YAML and data parts (separated by ... or ---) raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 1, re.MULTILINE) data = raw[2] if len(raw) > 2 else '' data = data.lstrip('\n') raw = raw[0] try: options = yaml.safe_load(raw) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return if options is None: options = {} else: options = {} data = [] raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 0, re.MULTILINE) rawmode = True for chunk in raw: chunk = chunk.strip('\n') if not chunk: continue if rawmode: if chunk.startswith('---'): rawmode = False else: data.append(chunk) else: if chunk.startswith('---') or chunk.startswith('...'): rawmode = True else: try: options.update(yaml.safe_load(chunk)) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return data = '\n'.join(data) return function(options=options, data=data, element=element, doc=doc)
[ "def", "yaml_filter", "(", "element", ",", "doc", ",", "tag", "=", "None", ",", "function", "=", "None", ",", "tags", "=", "None", ",", "strict_yaml", "=", "False", ")", ":", "# Allow for either tag+function or a dict {tag: function}", "assert", "(", "tag", "is", "None", ")", "+", "(", "tags", "is", "None", ")", "==", "1", "# XOR", "if", "tags", "is", "None", ":", "tags", "=", "{", "tag", ":", "function", "}", "if", "type", "(", "element", ")", "==", "CodeBlock", ":", "for", "tag", "in", "tags", ":", "if", "tag", "in", "element", ".", "classes", ":", "function", "=", "tags", "[", "tag", "]", "if", "not", "strict_yaml", ":", "# Split YAML and data parts (separated by ... or ---)", "raw", "=", "re", ".", "split", "(", "\"^([.]{3,}|[-]{3,})$\"", ",", "element", ".", "text", ",", "1", ",", "re", ".", "MULTILINE", ")", "data", "=", "raw", "[", "2", "]", "if", "len", "(", "raw", ")", ">", "2", "else", "''", "data", "=", "data", ".", "lstrip", "(", "'\\n'", ")", "raw", "=", "raw", "[", "0", "]", "try", ":", "options", "=", "yaml", ".", "safe_load", "(", "raw", ")", "except", "yaml", ".", "scanner", ".", "ScannerError", ":", "debug", "(", "\"panflute: malformed YAML block\"", ")", "return", "if", "options", "is", "None", ":", "options", "=", "{", "}", "else", ":", "options", "=", "{", "}", "data", "=", "[", "]", "raw", "=", "re", ".", "split", "(", "\"^([.]{3,}|[-]{3,})$\"", ",", "element", ".", "text", ",", "0", ",", "re", ".", "MULTILINE", ")", "rawmode", "=", "True", "for", "chunk", "in", "raw", ":", "chunk", "=", "chunk", ".", "strip", "(", "'\\n'", ")", "if", "not", "chunk", ":", "continue", "if", "rawmode", ":", "if", "chunk", ".", "startswith", "(", "'---'", ")", ":", "rawmode", "=", "False", "else", ":", "data", ".", "append", "(", "chunk", ")", "else", ":", "if", "chunk", ".", "startswith", "(", "'---'", ")", "or", "chunk", ".", "startswith", "(", "'...'", ")", ":", "rawmode", "=", "True", "else", ":", "try", ":", "options", ".", "update", "(", "yaml", ".", "safe_load", "(", "chunk", ")", ")", "except", "yaml", ".", "scanner", ".", "ScannerError", ":", "debug", "(", "\"panflute: malformed YAML block\"", ")", "return", "data", "=", "'\\n'", ".", "join", "(", "data", ")", "return", "function", "(", "options", "=", "options", ",", "data", "=", "data", ",", "element", "=", "element", ",", "doc", "=", "doc", ")" ]
Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
[ "Convenience", "function", "for", "parsing", "code", "blocks", "with", "YAML", "options" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L44-L158
12,373
sergiocorreia/panflute
panflute/base.py
Element._set_content
def _set_content(self, value, oktypes): """ Similar to content.setter but when there are no existing oktypes """ if value is None: value = [] self._content = ListContainer(*value, oktypes=oktypes, parent=self)
python
def _set_content(self, value, oktypes): """ Similar to content.setter but when there are no existing oktypes """ if value is None: value = [] self._content = ListContainer(*value, oktypes=oktypes, parent=self)
[ "def", "_set_content", "(", "self", ",", "value", ",", "oktypes", ")", ":", "if", "value", "is", "None", ":", "value", "=", "[", "]", "self", ".", "_content", "=", "ListContainer", "(", "*", "value", ",", "oktypes", "=", "oktypes", ",", "parent", "=", "self", ")" ]
Similar to content.setter but when there are no existing oktypes
[ "Similar", "to", "content", ".", "setter", "but", "when", "there", "are", "no", "existing", "oktypes" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L123-L129
12,374
sergiocorreia/panflute
panflute/base.py
Element.offset
def offset(self, n): """ Return a sibling element offset by n :rtype: :class:`Element` | ``None`` """ idx = self.index if idx is not None: sibling = idx + n container = self.container if 0 <= sibling < len(container): return container[sibling]
python
def offset(self, n): """ Return a sibling element offset by n :rtype: :class:`Element` | ``None`` """ idx = self.index if idx is not None: sibling = idx + n container = self.container if 0 <= sibling < len(container): return container[sibling]
[ "def", "offset", "(", "self", ",", "n", ")", ":", "idx", "=", "self", ".", "index", "if", "idx", "is", "not", "None", ":", "sibling", "=", "idx", "+", "n", "container", "=", "self", ".", "container", "if", "0", "<=", "sibling", "<", "len", "(", "container", ")", ":", "return", "container", "[", "sibling", "]" ]
Return a sibling element offset by n :rtype: :class:`Element` | ``None``
[ "Return", "a", "sibling", "element", "offset", "by", "n" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L166-L178
12,375
laike9m/pdir2
pdir/api.py
PrettyDir.search
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir': """Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names. """ if case_sensitive: return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name] ) else: term = term.lower() return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()] )
python
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir': """Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names. """ if case_sensitive: return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name] ) else: term = term.lower() return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()] )
[ "def", "search", "(", "self", ",", "term", ":", "str", ",", "case_sensitive", ":", "bool", "=", "False", ")", "->", "'PrettyDir'", ":", "if", "case_sensitive", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "term", "in", "pattr", ".", "name", "]", ")", "else", ":", "term", "=", "term", ".", "lower", "(", ")", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "term", "in", "pattr", ".", "name", ".", "lower", "(", ")", "]", ")" ]
Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names.
[ "Searches", "for", "names", "that", "match", "some", "pattern", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L74-L94
12,376
laike9m/pdir2
pdir/api.py
PrettyDir.properties
def properties(self) -> 'PrettyDir': """Returns all properties of the inspected object. Note that "properties" can mean "variables". """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.PROPERTY) ], )
python
def properties(self) -> 'PrettyDir': """Returns all properties of the inspected object. Note that "properties" can mean "variables". """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.PROPERTY) ], )
[ "def", "properties", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "category_match", "(", "pattr", ".", "category", ",", "AttrCategory", ".", "PROPERTY", ")", "]", ",", ")" ]
Returns all properties of the inspected object. Note that "properties" can mean "variables".
[ "Returns", "all", "properties", "of", "the", "inspected", "object", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L104-L116
12,377
laike9m/pdir2
pdir/api.py
PrettyDir.methods
def methods(self) -> 'PrettyDir': """Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION) ], )
python
def methods(self) -> 'PrettyDir': """Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION) ], )
[ "def", "methods", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "category_match", "(", "pattr", ".", "category", ",", "AttrCategory", ".", "FUNCTION", ")", "]", ",", ")" ]
Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module.
[ "Returns", "all", "methods", "of", "the", "inspected", "object", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L119-L131
12,378
laike9m/pdir2
pdir/api.py
PrettyDir.public
def public(self) -> 'PrettyDir': """Returns public attributes of the inspected object.""" return PrettyDir( self.obj, [pattr for pattr in self.pattrs if not pattr.name.startswith('_')] )
python
def public(self) -> 'PrettyDir': """Returns public attributes of the inspected object.""" return PrettyDir( self.obj, [pattr for pattr in self.pattrs if not pattr.name.startswith('_')] )
[ "def", "public", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "not", "pattr", ".", "name", ".", "startswith", "(", "'_'", ")", "]", ")" ]
Returns public attributes of the inspected object.
[ "Returns", "public", "attributes", "of", "the", "inspected", "object", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L134-L138
12,379
laike9m/pdir2
pdir/api.py
PrettyDir.own
def own(self) -> 'PrettyDir': """Returns attributes that are not inhterited from parent classes. Now we only use a simple judgement, it is expected that many attributes not get returned, especially invoked on a module. For instance, there's no way to distinguish between properties that are initialized in instance class's __init__ and parent class's __init__(assuming super() is called). So we'll just leave it. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if pattr.name in type(self.obj).__dict__ or pattr.name in self.obj.__dict__ ], )
python
def own(self) -> 'PrettyDir': """Returns attributes that are not inhterited from parent classes. Now we only use a simple judgement, it is expected that many attributes not get returned, especially invoked on a module. For instance, there's no way to distinguish between properties that are initialized in instance class's __init__ and parent class's __init__(assuming super() is called). So we'll just leave it. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if pattr.name in type(self.obj).__dict__ or pattr.name in self.obj.__dict__ ], )
[ "def", "own", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "pattr", ".", "name", "in", "type", "(", "self", ".", "obj", ")", ".", "__dict__", "or", "pattr", ".", "name", "in", "self", ".", "obj", ".", "__dict__", "]", ",", ")" ]
Returns attributes that are not inhterited from parent classes. Now we only use a simple judgement, it is expected that many attributes not get returned, especially invoked on a module. For instance, there's no way to distinguish between properties that are initialized in instance class's __init__ and parent class's __init__(assuming super() is called). So we'll just leave it.
[ "Returns", "attributes", "that", "are", "not", "inhterited", "from", "parent", "classes", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L141-L159
12,380
laike9m/pdir2
pdir/api.py
PrettyAttribute.get_oneline_doc
def get_oneline_doc(self) -> str: """ Doc doesn't necessarily mean doctring. It could be anything that should be put after the attr's name as an explanation. """ attr = self.attr_obj if self.display_group == AttrCategory.DESCRIPTOR: if isinstance(attr, property): doc_list = ['@property with getter'] if attr.fset: doc_list.append(SETTER) if attr.fdel: doc_list.append(DELETER) else: doc_list = ['class %s' % attr.__class__.__name__] if hasattr(attr, '__get__'): doc_list.append(GETTER) if hasattr(attr, '__set__'): doc_list.append(SETTER) if hasattr(attr, '__delete__'): doc_list.append(DELETER) doc_list[0] = ' '.join([doc_list[0], 'with', doc_list.pop(1)]) if attr.__doc__ is not None: doc_list.append(inspect.getdoc(attr).split('\n', 1)[0]) return ', '.join(doc_list) if hasattr(attr, '__doc__'): doc = inspect.getdoc(attr) return doc.split('\n', 1)[0] if doc else '' # default doc is None return ''
python
def get_oneline_doc(self) -> str: """ Doc doesn't necessarily mean doctring. It could be anything that should be put after the attr's name as an explanation. """ attr = self.attr_obj if self.display_group == AttrCategory.DESCRIPTOR: if isinstance(attr, property): doc_list = ['@property with getter'] if attr.fset: doc_list.append(SETTER) if attr.fdel: doc_list.append(DELETER) else: doc_list = ['class %s' % attr.__class__.__name__] if hasattr(attr, '__get__'): doc_list.append(GETTER) if hasattr(attr, '__set__'): doc_list.append(SETTER) if hasattr(attr, '__delete__'): doc_list.append(DELETER) doc_list[0] = ' '.join([doc_list[0], 'with', doc_list.pop(1)]) if attr.__doc__ is not None: doc_list.append(inspect.getdoc(attr).split('\n', 1)[0]) return ', '.join(doc_list) if hasattr(attr, '__doc__'): doc = inspect.getdoc(attr) return doc.split('\n', 1)[0] if doc else '' # default doc is None return ''
[ "def", "get_oneline_doc", "(", "self", ")", "->", "str", ":", "attr", "=", "self", ".", "attr_obj", "if", "self", ".", "display_group", "==", "AttrCategory", ".", "DESCRIPTOR", ":", "if", "isinstance", "(", "attr", ",", "property", ")", ":", "doc_list", "=", "[", "'@property with getter'", "]", "if", "attr", ".", "fset", ":", "doc_list", ".", "append", "(", "SETTER", ")", "if", "attr", ".", "fdel", ":", "doc_list", ".", "append", "(", "DELETER", ")", "else", ":", "doc_list", "=", "[", "'class %s'", "%", "attr", ".", "__class__", ".", "__name__", "]", "if", "hasattr", "(", "attr", ",", "'__get__'", ")", ":", "doc_list", ".", "append", "(", "GETTER", ")", "if", "hasattr", "(", "attr", ",", "'__set__'", ")", ":", "doc_list", ".", "append", "(", "SETTER", ")", "if", "hasattr", "(", "attr", ",", "'__delete__'", ")", ":", "doc_list", ".", "append", "(", "DELETER", ")", "doc_list", "[", "0", "]", "=", "' '", ".", "join", "(", "[", "doc_list", "[", "0", "]", ",", "'with'", ",", "doc_list", ".", "pop", "(", "1", ")", "]", ")", "if", "attr", ".", "__doc__", "is", "not", "None", ":", "doc_list", ".", "append", "(", "inspect", ".", "getdoc", "(", "attr", ")", ".", "split", "(", "'\\n'", ",", "1", ")", "[", "0", "]", ")", "return", "', '", ".", "join", "(", "doc_list", ")", "if", "hasattr", "(", "attr", ",", "'__doc__'", ")", ":", "doc", "=", "inspect", ".", "getdoc", "(", "attr", ")", "return", "doc", ".", "split", "(", "'\\n'", ",", "1", ")", "[", "0", "]", "if", "doc", "else", "''", "# default doc is None", "return", "''" ]
Doc doesn't necessarily mean doctring. It could be anything that should be put after the attr's name as an explanation.
[ "Doc", "doesn", "t", "necessarily", "mean", "doctring", ".", "It", "could", "be", "anything", "that", "should", "be", "put", "after", "the", "attr", "s", "name", "as", "an", "explanation", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L183-L212
12,381
laike9m/pdir2
pdir/format.py
format_pattrs
def format_pattrs(pattrs: List['api.PrettyAttribute']) -> str: """Generates repr string given a list of pattrs.""" output = [] pattrs.sort( key=lambda x: ( _FORMATTER[x.display_group].display_index, x.display_group, x.name, ) ) for display_group, grouped_pattrs in groupby(pattrs, lambda x: x.display_group): output.append( _FORMATTER[display_group].formatter(display_group, grouped_pattrs) ) return '\n'.join(output)
python
def format_pattrs(pattrs: List['api.PrettyAttribute']) -> str: """Generates repr string given a list of pattrs.""" output = [] pattrs.sort( key=lambda x: ( _FORMATTER[x.display_group].display_index, x.display_group, x.name, ) ) for display_group, grouped_pattrs in groupby(pattrs, lambda x: x.display_group): output.append( _FORMATTER[display_group].formatter(display_group, grouped_pattrs) ) return '\n'.join(output)
[ "def", "format_pattrs", "(", "pattrs", ":", "List", "[", "'api.PrettyAttribute'", "]", ")", "->", "str", ":", "output", "=", "[", "]", "pattrs", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "_FORMATTER", "[", "x", ".", "display_group", "]", ".", "display_index", ",", "x", ".", "display_group", ",", "x", ".", "name", ",", ")", ")", "for", "display_group", ",", "grouped_pattrs", "in", "groupby", "(", "pattrs", ",", "lambda", "x", ":", "x", ".", "display_group", ")", ":", "output", ".", "append", "(", "_FORMATTER", "[", "display_group", "]", ".", "formatter", "(", "display_group", ",", "grouped_pattrs", ")", ")", "return", "'\\n'", ".", "join", "(", "output", ")" ]
Generates repr string given a list of pattrs.
[ "Generates", "repr", "string", "given", "a", "list", "of", "pattrs", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/format.py#L14-L29
12,382
laike9m/pdir2
pdir/_internal_utils.py
get_attr_from_dict
def get_attr_from_dict(inspected_obj: Any, attr_name: str) -> Any: """Ensures we get descriptor object instead of its return value. """ if inspect.isclass(inspected_obj): obj_list = [inspected_obj] + list(inspected_obj.__mro__) else: obj_list = [inspected_obj] + list(inspected_obj.__class__.__mro__) for obj in obj_list: if hasattr(obj, '__dict__') and attr_name in obj.__dict__: return obj.__dict__[attr_name] # This happens when user-defined __dir__ returns something that's not # in any __dict__. See test_override_dir. # Returns attr_name so that it's treated as a normal property. return attr_name
python
def get_attr_from_dict(inspected_obj: Any, attr_name: str) -> Any: """Ensures we get descriptor object instead of its return value. """ if inspect.isclass(inspected_obj): obj_list = [inspected_obj] + list(inspected_obj.__mro__) else: obj_list = [inspected_obj] + list(inspected_obj.__class__.__mro__) for obj in obj_list: if hasattr(obj, '__dict__') and attr_name in obj.__dict__: return obj.__dict__[attr_name] # This happens when user-defined __dir__ returns something that's not # in any __dict__. See test_override_dir. # Returns attr_name so that it's treated as a normal property. return attr_name
[ "def", "get_attr_from_dict", "(", "inspected_obj", ":", "Any", ",", "attr_name", ":", "str", ")", "->", "Any", ":", "if", "inspect", ".", "isclass", "(", "inspected_obj", ")", ":", "obj_list", "=", "[", "inspected_obj", "]", "+", "list", "(", "inspected_obj", ".", "__mro__", ")", "else", ":", "obj_list", "=", "[", "inspected_obj", "]", "+", "list", "(", "inspected_obj", ".", "__class__", ".", "__mro__", ")", "for", "obj", "in", "obj_list", ":", "if", "hasattr", "(", "obj", ",", "'__dict__'", ")", "and", "attr_name", "in", "obj", ".", "__dict__", ":", "return", "obj", ".", "__dict__", "[", "attr_name", "]", "# This happens when user-defined __dir__ returns something that's not", "# in any __dict__. See test_override_dir.", "# Returns attr_name so that it's treated as a normal property.", "return", "attr_name" ]
Ensures we get descriptor object instead of its return value.
[ "Ensures", "we", "get", "descriptor", "object", "instead", "of", "its", "return", "value", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/_internal_utils.py#L9-L22
12,383
laike9m/pdir2
pdir/attr_category.py
attr_category_postprocess
def attr_category_postprocess(get_attr_category_func): """Unifies attr_category to a tuple, add AttrCategory.SLOT if needed.""" @functools.wraps(get_attr_category_func) def wrapped( name: str, attr: Any, obj: Any ) -> Tuple[AttrCategory, ...]: category = get_attr_category_func(name, attr, obj) category = list(category) if isinstance(category, tuple) else [category] if is_slotted_attr(obj, name): # Refactoring all tuples to lists is not easy # and pleasant. Maybe do this in future if necessary category.append(AttrCategory.SLOT) return tuple(category) return wrapped
python
def attr_category_postprocess(get_attr_category_func): """Unifies attr_category to a tuple, add AttrCategory.SLOT if needed.""" @functools.wraps(get_attr_category_func) def wrapped( name: str, attr: Any, obj: Any ) -> Tuple[AttrCategory, ...]: category = get_attr_category_func(name, attr, obj) category = list(category) if isinstance(category, tuple) else [category] if is_slotted_attr(obj, name): # Refactoring all tuples to lists is not easy # and pleasant. Maybe do this in future if necessary category.append(AttrCategory.SLOT) return tuple(category) return wrapped
[ "def", "attr_category_postprocess", "(", "get_attr_category_func", ")", ":", "@", "functools", ".", "wraps", "(", "get_attr_category_func", ")", "def", "wrapped", "(", "name", ":", "str", ",", "attr", ":", "Any", ",", "obj", ":", "Any", ")", "->", "Tuple", "[", "AttrCategory", ",", "...", "]", ":", "category", "=", "get_attr_category_func", "(", "name", ",", "attr", ",", "obj", ")", "category", "=", "list", "(", "category", ")", "if", "isinstance", "(", "category", ",", "tuple", ")", "else", "[", "category", "]", "if", "is_slotted_attr", "(", "obj", ",", "name", ")", ":", "# Refactoring all tuples to lists is not easy", "# and pleasant. Maybe do this in future if necessary", "category", ".", "append", "(", "AttrCategory", ".", "SLOT", ")", "return", "tuple", "(", "category", ")", "return", "wrapped" ]
Unifies attr_category to a tuple, add AttrCategory.SLOT if needed.
[ "Unifies", "attr_category", "to", "a", "tuple", "add", "AttrCategory", ".", "SLOT", "if", "needed", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/attr_category.py#L216-L230
12,384
mattloper/chumpy
chumpy/monitor.py
get_peak_mem
def get_peak_mem(): ''' this returns peak memory use since process starts till the moment its called ''' import resource rusage_denom = 1024. if sys.platform == 'darwin': # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
python
def get_peak_mem(): ''' this returns peak memory use since process starts till the moment its called ''' import resource rusage_denom = 1024. if sys.platform == 'darwin': # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
[ "def", "get_peak_mem", "(", ")", ":", "import", "resource", "rusage_denom", "=", "1024.", "if", "sys", ".", "platform", "==", "'darwin'", ":", "# ... it seems that in OSX the output is different units ...", "rusage_denom", "=", "rusage_denom", "*", "rusage_denom", "mem", "=", "resource", ".", "getrusage", "(", "resource", ".", "RUSAGE_SELF", ")", ".", "ru_maxrss", "/", "rusage_denom", "return", "mem" ]
this returns peak memory use since process starts till the moment its called
[ "this", "returns", "peak", "memory", "use", "since", "process", "starts", "till", "the", "moment", "its", "called" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/monitor.py#L26-L36
12,385
mattloper/chumpy
chumpy/utils.py
dfs_do_func_on_graph
def dfs_do_func_on_graph(node, func, *args, **kwargs): ''' invoke func on each node of the dr graph ''' for _node in node.tree_iterator(): func(_node, *args, **kwargs)
python
def dfs_do_func_on_graph(node, func, *args, **kwargs): ''' invoke func on each node of the dr graph ''' for _node in node.tree_iterator(): func(_node, *args, **kwargs)
[ "def", "dfs_do_func_on_graph", "(", "node", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "_node", "in", "node", ".", "tree_iterator", "(", ")", ":", "func", "(", "_node", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
invoke func on each node of the dr graph
[ "invoke", "func", "on", "each", "node", "of", "the", "dr", "graph" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L36-L41
12,386
mattloper/chumpy
chumpy/utils.py
sparse_is_desireable
def sparse_is_desireable(lhs, rhs): ''' Examines a pair of matrices and determines if the result of their multiplication should be sparse or not. ''' return False if len(lhs.shape) == 1: return False else: lhs_rows, lhs_cols = lhs.shape if len(rhs.shape) == 1: rhs_rows = 1 rhs_cols = rhs.size else: rhs_rows, rhs_cols = rhs.shape result_size = lhs_rows * rhs_cols if sp.issparse(lhs) and sp.issparse(rhs): return True elif sp.issparse(lhs): lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size rhs_zero_cols = np.all(rhs==0, axis=0).sum() elif sp.issparse(rhs): lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size else: lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = np.all(rhs==0, axis=0).sum() num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return (float(num_zeros) / float(size)) >= 0.5
python
def sparse_is_desireable(lhs, rhs): ''' Examines a pair of matrices and determines if the result of their multiplication should be sparse or not. ''' return False if len(lhs.shape) == 1: return False else: lhs_rows, lhs_cols = lhs.shape if len(rhs.shape) == 1: rhs_rows = 1 rhs_cols = rhs.size else: rhs_rows, rhs_cols = rhs.shape result_size = lhs_rows * rhs_cols if sp.issparse(lhs) and sp.issparse(rhs): return True elif sp.issparse(lhs): lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size rhs_zero_cols = np.all(rhs==0, axis=0).sum() elif sp.issparse(rhs): lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size else: lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = np.all(rhs==0, axis=0).sum() num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return (float(num_zeros) / float(size)) >= 0.5
[ "def", "sparse_is_desireable", "(", "lhs", ",", "rhs", ")", ":", "return", "False", "if", "len", "(", "lhs", ".", "shape", ")", "==", "1", ":", "return", "False", "else", ":", "lhs_rows", ",", "lhs_cols", "=", "lhs", ".", "shape", "if", "len", "(", "rhs", ".", "shape", ")", "==", "1", ":", "rhs_rows", "=", "1", "rhs_cols", "=", "rhs", ".", "size", "else", ":", "rhs_rows", ",", "rhs_cols", "=", "rhs", ".", "shape", "result_size", "=", "lhs_rows", "*", "rhs_cols", "if", "sp", ".", "issparse", "(", "lhs", ")", "and", "sp", ".", "issparse", "(", "rhs", ")", ":", "return", "True", "elif", "sp", ".", "issparse", "(", "lhs", ")", ":", "lhs_zero_rows", "=", "lhs_rows", "-", "np", ".", "unique", "(", "lhs", ".", "nonzero", "(", ")", "[", "0", "]", ")", ".", "size", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "elif", "sp", ".", "issparse", "(", "rhs", ")", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "rhs_cols", "-", "np", ".", "unique", "(", "rhs", ".", "nonzero", "(", ")", "[", "1", "]", ")", ".", "size", "else", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "num_zeros", "=", "lhs_zero_rows", "*", "rhs_cols", "+", "rhs_zero_cols", "*", "lhs_rows", "-", "lhs_zero_rows", "*", "rhs_zero_cols", "# A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff.", "return", "(", "float", "(", "num_zeros", ")", "/", "float", "(", "size", ")", ")", ">=", "0.5" ]
Examines a pair of matrices and determines if the result of their multiplication should be sparse or not.
[ "Examines", "a", "pair", "of", "matrices", "and", "determines", "if", "the", "result", "of", "their", "multiplication", "should", "be", "sparse", "or", "not", "." ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L44-L78
12,387
mattloper/chumpy
chumpy/utils.py
convert_inputs_to_sparse_if_necessary
def convert_inputs_to_sparse_if_necessary(lhs, rhs): ''' This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so. ''' if not sp.issparse(lhs) or not sp.issparse(rhs): if sparse_is_desireable(lhs, rhs): if not sp.issparse(lhs): lhs = sp.csc_matrix(lhs) #print "converting lhs into sparse matrix" if not sp.issparse(rhs): rhs = sp.csc_matrix(rhs) #print "converting rhs into sparse matrix" return lhs, rhs
python
def convert_inputs_to_sparse_if_necessary(lhs, rhs): ''' This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so. ''' if not sp.issparse(lhs) or not sp.issparse(rhs): if sparse_is_desireable(lhs, rhs): if not sp.issparse(lhs): lhs = sp.csc_matrix(lhs) #print "converting lhs into sparse matrix" if not sp.issparse(rhs): rhs = sp.csc_matrix(rhs) #print "converting rhs into sparse matrix" return lhs, rhs
[ "def", "convert_inputs_to_sparse_if_necessary", "(", "lhs", ",", "rhs", ")", ":", "if", "not", "sp", ".", "issparse", "(", "lhs", ")", "or", "not", "sp", ".", "issparse", "(", "rhs", ")", ":", "if", "sparse_is_desireable", "(", "lhs", ",", "rhs", ")", ":", "if", "not", "sp", ".", "issparse", "(", "lhs", ")", ":", "lhs", "=", "sp", ".", "csc_matrix", "(", "lhs", ")", "#print \"converting lhs into sparse matrix\"", "if", "not", "sp", ".", "issparse", "(", "rhs", ")", ":", "rhs", "=", "sp", ".", "csc_matrix", "(", "rhs", ")", "#print \"converting rhs into sparse matrix\"", "return", "lhs", ",", "rhs" ]
This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so.
[ "This", "function", "checks", "to", "see", "if", "a", "sparse", "output", "is", "desireable", "given", "the", "inputs", "and", "if", "so", "casts", "the", "inputs", "to", "sparse", "in", "order", "to", "make", "it", "so", "." ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L81-L93
12,388
mattloper/chumpy
chumpy/optimization_internal.py
ChInputsStacked.dr_wrt
def dr_wrt(self, wrt, profiler=None): ''' Loop over free variables and delete cache for the whole tree after finished each one ''' if wrt is self.x: jacs = [] for fvi, freevar in enumerate(self.free_variables): tm = timer() if isinstance(freevar, ch.Select): new_jac = self.obj.dr_wrt(freevar.a, profiler=profiler) try: new_jac = new_jac[:, freevar.idxs] except: # non-csc sparse matrices may not support column-wise indexing new_jac = new_jac.tocsc()[:, freevar.idxs] else: new_jac = self.obj.dr_wrt(freevar, profiler=profiler) pif('dx wrt {} in {}sec, sparse: {}'.format(freevar.short_name, tm(), sp.issparse(new_jac))) if self._make_dense and sp.issparse(new_jac): new_jac = new_jac.todense() if self._make_sparse and not sp.issparse(new_jac): new_jac = sp.csc_matrix(new_jac) if new_jac is None: raise Exception( 'Objective has no derivative wrt free variable {}. ' 'You should likely remove it.'.format(fvi)) jacs.append(new_jac) tm = timer() utils.dfs_do_func_on_graph(self.obj, clear_cache_single) pif('dfs_do_func_on_graph in {}sec'.format(tm())) tm = timer() J = hstack(jacs) pif('hstack in {}sec'.format(tm())) return J
python
def dr_wrt(self, wrt, profiler=None): ''' Loop over free variables and delete cache for the whole tree after finished each one ''' if wrt is self.x: jacs = [] for fvi, freevar in enumerate(self.free_variables): tm = timer() if isinstance(freevar, ch.Select): new_jac = self.obj.dr_wrt(freevar.a, profiler=profiler) try: new_jac = new_jac[:, freevar.idxs] except: # non-csc sparse matrices may not support column-wise indexing new_jac = new_jac.tocsc()[:, freevar.idxs] else: new_jac = self.obj.dr_wrt(freevar, profiler=profiler) pif('dx wrt {} in {}sec, sparse: {}'.format(freevar.short_name, tm(), sp.issparse(new_jac))) if self._make_dense and sp.issparse(new_jac): new_jac = new_jac.todense() if self._make_sparse and not sp.issparse(new_jac): new_jac = sp.csc_matrix(new_jac) if new_jac is None: raise Exception( 'Objective has no derivative wrt free variable {}. ' 'You should likely remove it.'.format(fvi)) jacs.append(new_jac) tm = timer() utils.dfs_do_func_on_graph(self.obj, clear_cache_single) pif('dfs_do_func_on_graph in {}sec'.format(tm())) tm = timer() J = hstack(jacs) pif('hstack in {}sec'.format(tm())) return J
[ "def", "dr_wrt", "(", "self", ",", "wrt", ",", "profiler", "=", "None", ")", ":", "if", "wrt", "is", "self", ".", "x", ":", "jacs", "=", "[", "]", "for", "fvi", ",", "freevar", "in", "enumerate", "(", "self", ".", "free_variables", ")", ":", "tm", "=", "timer", "(", ")", "if", "isinstance", "(", "freevar", ",", "ch", ".", "Select", ")", ":", "new_jac", "=", "self", ".", "obj", ".", "dr_wrt", "(", "freevar", ".", "a", ",", "profiler", "=", "profiler", ")", "try", ":", "new_jac", "=", "new_jac", "[", ":", ",", "freevar", ".", "idxs", "]", "except", ":", "# non-csc sparse matrices may not support column-wise indexing", "new_jac", "=", "new_jac", ".", "tocsc", "(", ")", "[", ":", ",", "freevar", ".", "idxs", "]", "else", ":", "new_jac", "=", "self", ".", "obj", ".", "dr_wrt", "(", "freevar", ",", "profiler", "=", "profiler", ")", "pif", "(", "'dx wrt {} in {}sec, sparse: {}'", ".", "format", "(", "freevar", ".", "short_name", ",", "tm", "(", ")", ",", "sp", ".", "issparse", "(", "new_jac", ")", ")", ")", "if", "self", ".", "_make_dense", "and", "sp", ".", "issparse", "(", "new_jac", ")", ":", "new_jac", "=", "new_jac", ".", "todense", "(", ")", "if", "self", ".", "_make_sparse", "and", "not", "sp", ".", "issparse", "(", "new_jac", ")", ":", "new_jac", "=", "sp", ".", "csc_matrix", "(", "new_jac", ")", "if", "new_jac", "is", "None", ":", "raise", "Exception", "(", "'Objective has no derivative wrt free variable {}. '", "'You should likely remove it.'", ".", "format", "(", "fvi", ")", ")", "jacs", ".", "append", "(", "new_jac", ")", "tm", "=", "timer", "(", ")", "utils", ".", "dfs_do_func_on_graph", "(", "self", ".", "obj", ",", "clear_cache_single", ")", "pif", "(", "'dfs_do_func_on_graph in {}sec'", ".", "format", "(", "tm", "(", ")", ")", ")", "tm", "=", "timer", "(", ")", "J", "=", "hstack", "(", "jacs", ")", "pif", "(", "'hstack in {}sec'", ".", "format", "(", "tm", "(", ")", ")", ")", "return", "J" ]
Loop over free variables and delete cache for the whole tree after finished each one
[ "Loop", "over", "free", "variables", "and", "delete", "cache", "for", "the", "whole", "tree", "after", "finished", "each", "one" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/optimization_internal.py#L34-L71
12,389
mattloper/chumpy
chumpy/optimization_internal.py
ChInputsStacked.J
def J(self): ''' Compute Jacobian. Analyze dr graph first to disable unnecessary caching ''' result = self.dr_wrt(self.x, profiler=self.profiler).copy() if self.profiler: self.profiler.harvest() return np.atleast_2d(result) if not sp.issparse(result) else result
python
def J(self): ''' Compute Jacobian. Analyze dr graph first to disable unnecessary caching ''' result = self.dr_wrt(self.x, profiler=self.profiler).copy() if self.profiler: self.profiler.harvest() return np.atleast_2d(result) if not sp.issparse(result) else result
[ "def", "J", "(", "self", ")", ":", "result", "=", "self", ".", "dr_wrt", "(", "self", ".", "x", ",", "profiler", "=", "self", ".", "profiler", ")", ".", "copy", "(", ")", "if", "self", ".", "profiler", ":", "self", ".", "profiler", ".", "harvest", "(", ")", "return", "np", ".", "atleast_2d", "(", "result", ")", "if", "not", "sp", ".", "issparse", "(", "result", ")", "else", "result" ]
Compute Jacobian. Analyze dr graph first to disable unnecessary caching
[ "Compute", "Jacobian", ".", "Analyze", "dr", "graph", "first", "to", "disable", "unnecessary", "caching" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/optimization_internal.py#L101-L108
12,390
mattloper/chumpy
chumpy/ch.py
Ch.sid
def sid(self): """Semantic id.""" pnames = list(self.terms)+list(self.dterms) pnames.sort() return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__]))
python
def sid(self): """Semantic id.""" pnames = list(self.terms)+list(self.dterms) pnames.sort() return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__]))
[ "def", "sid", "(", "self", ")", ":", "pnames", "=", "list", "(", "self", ".", "terms", ")", "+", "list", "(", "self", ".", "dterms", ")", "pnames", ".", "sort", "(", ")", "return", "(", "self", ".", "__class__", ",", "tuple", "(", "[", "(", "k", ",", "id", "(", "self", ".", "__dict__", "[", "k", "]", ")", ")", "for", "k", "in", "pnames", "if", "k", "in", "self", ".", "__dict__", "]", ")", ")" ]
Semantic id.
[ "Semantic", "id", "." ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/ch.py#L185-L189
12,391
mattloper/chumpy
chumpy/ch.py
Ch.compute_dr_wrt
def compute_dr_wrt(self,wrt): """Default method for objects that just contain a number or ndarray""" if wrt is self: # special base case return sp.eye(self.x.size, self.x.size) #return np.array([[1]]) return None
python
def compute_dr_wrt(self,wrt): """Default method for objects that just contain a number or ndarray""" if wrt is self: # special base case return sp.eye(self.x.size, self.x.size) #return np.array([[1]]) return None
[ "def", "compute_dr_wrt", "(", "self", ",", "wrt", ")", ":", "if", "wrt", "is", "self", ":", "# special base case ", "return", "sp", ".", "eye", "(", "self", ".", "x", ".", "size", ",", "self", ".", "x", ".", "size", ")", "#return np.array([[1]])", "return", "None" ]
Default method for objects that just contain a number or ndarray
[ "Default", "method", "for", "objects", "that", "just", "contain", "a", "number", "or", "ndarray" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/ch.py#L275-L280
12,392
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.get_ubuntu_release_from_sentry
def get_ubuntu_release_from_sentry(self, sentry_unit): """Get Ubuntu release codename from sentry unit. :param sentry_unit: amulet sentry/service unit pointer :returns: list of strings - release codename, failure message """ msg = None cmd = 'lsb_release -cs' release, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} lsb_release: {}'.format( sentry_unit.info['unit_name'], release)) else: msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, release, code)) if release not in self.ubuntu_releases: msg = ("Release ({}) not found in Ubuntu releases " "({})".format(release, self.ubuntu_releases)) return release, msg
python
def get_ubuntu_release_from_sentry(self, sentry_unit): """Get Ubuntu release codename from sentry unit. :param sentry_unit: amulet sentry/service unit pointer :returns: list of strings - release codename, failure message """ msg = None cmd = 'lsb_release -cs' release, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} lsb_release: {}'.format( sentry_unit.info['unit_name'], release)) else: msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, release, code)) if release not in self.ubuntu_releases: msg = ("Release ({}) not found in Ubuntu releases " "({})".format(release, self.ubuntu_releases)) return release, msg
[ "def", "get_ubuntu_release_from_sentry", "(", "self", ",", "sentry_unit", ")", ":", "msg", "=", "None", "cmd", "=", "'lsb_release -cs'", "release", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "if", "code", "==", "0", ":", "self", ".", "log", ".", "debug", "(", "'{} lsb_release: {}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "release", ")", ")", "else", ":", "msg", "=", "(", "'{} `{}` returned {} '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "release", ",", "code", ")", ")", "if", "release", "not", "in", "self", ".", "ubuntu_releases", ":", "msg", "=", "(", "\"Release ({}) not found in Ubuntu releases \"", "\"({})\"", ".", "format", "(", "release", ",", "self", ".", "ubuntu_releases", ")", ")", "return", "release", ",", "msg" ]
Get Ubuntu release codename from sentry unit. :param sentry_unit: amulet sentry/service unit pointer :returns: list of strings - release codename, failure message
[ "Get", "Ubuntu", "release", "codename", "from", "sentry", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L83-L102
12,393
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_services
def validate_services(self, commands): """Validate that lists of commands succeed on service units. Can be used to verify system services are running on the corresponding service units. :param commands: dict with sentry keys and arbitrary command list vals :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(k.info['unit_name'], cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None
python
def validate_services(self, commands): """Validate that lists of commands succeed on service units. Can be used to verify system services are running on the corresponding service units. :param commands: dict with sentry keys and arbitrary command list vals :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(k.info['unit_name'], cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None
[ "def", "validate_services", "(", "self", ",", "commands", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking status of system services...'", ")", "# /!\\ DEPRECATION WARNING (beisner):", "# New and existing tests should be rewritten to use", "# validate_services_by_name() as it is aware of init systems.", "self", ".", "log", ".", "warn", "(", "'DEPRECATION WARNING: use '", "'validate_services_by_name instead of validate_services '", "'due to init system differences.'", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "commands", ")", ":", "for", "cmd", "in", "v", ":", "output", ",", "code", "=", "k", ".", "run", "(", "cmd", ")", "self", ".", "log", ".", "debug", "(", "'{} `{}` returned '", "'{}'", ".", "format", "(", "k", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "if", "code", "!=", "0", ":", "return", "\"command `{}` returned {}\"", ".", "format", "(", "cmd", ",", "str", "(", "code", ")", ")", "return", "None" ]
Validate that lists of commands succeed on service units. Can be used to verify system services are running on the corresponding service units. :param commands: dict with sentry keys and arbitrary command list vals :returns: None if successful, Failure string message otherwise
[ "Validate", "that", "lists", "of", "commands", "succeed", "on", "service", "units", ".", "Can", "be", "used", "to", "verify", "system", "services", "are", "running", "on", "the", "corresponding", "service", "units", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L104-L129
12,394
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_services_by_name
def validate_services_by_name(self, sentry_services): """Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # Point at which systemd became a thing systemd_switch = self.ubuntu_releases.index('vivid') for sentry_unit, services_list in six.iteritems(sentry_services): # Get lsb_release codename from unit release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) if ret: return ret for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or service_name in ['rabbitmq-server', 'apache2', 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 and "start/running" in output self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) if not service_running: return u"command `{}` returned {} {}".format( cmd, output, str(code)) return None
python
def validate_services_by_name(self, sentry_services): """Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # Point at which systemd became a thing systemd_switch = self.ubuntu_releases.index('vivid') for sentry_unit, services_list in six.iteritems(sentry_services): # Get lsb_release codename from unit release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) if ret: return ret for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or service_name in ['rabbitmq-server', 'apache2', 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 and "start/running" in output self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) if not service_running: return u"command `{}` returned {} {}".format( cmd, output, str(code)) return None
[ "def", "validate_services_by_name", "(", "self", ",", "sentry_services", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking status of system services...'", ")", "# Point at which systemd became a thing", "systemd_switch", "=", "self", ".", "ubuntu_releases", ".", "index", "(", "'vivid'", ")", "for", "sentry_unit", ",", "services_list", "in", "six", ".", "iteritems", "(", "sentry_services", ")", ":", "# Get lsb_release codename from unit", "release", ",", "ret", "=", "self", ".", "get_ubuntu_release_from_sentry", "(", "sentry_unit", ")", "if", "ret", ":", "return", "ret", "for", "service_name", "in", "services_list", ":", "if", "(", "self", ".", "ubuntu_releases", ".", "index", "(", "release", ")", ">=", "systemd_switch", "or", "service_name", "in", "[", "'rabbitmq-server'", ",", "'apache2'", ",", "'memcached'", "]", ")", ":", "# init is systemd (or regular sysv)", "cmd", "=", "'sudo service {} status'", ".", "format", "(", "service_name", ")", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "service_running", "=", "code", "==", "0", "elif", "self", ".", "ubuntu_releases", ".", "index", "(", "release", ")", "<", "systemd_switch", ":", "# init is upstart", "cmd", "=", "'sudo status {}'", ".", "format", "(", "service_name", ")", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "service_running", "=", "code", "==", "0", "and", "\"start/running\"", "in", "output", "self", ".", "log", ".", "debug", "(", "'{} `{}` returned '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "if", "not", "service_running", ":", "return", "u\"command `{}` returned {} {}\"", ".", "format", "(", "cmd", ",", "output", ",", "str", "(", "code", ")", ")", "return", "None" ]
Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise
[ "Validate", "system", "service", "status", "by", "service", "name", "automatically", "detecting", "init", "system", "based", "on", "Ubuntu", "release", "codename", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L131-L169
12,395
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils._get_config
def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config
python
def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config
[ "def", "_get_config", "(", "self", ",", "unit", ",", "filename", ")", ":", "file_contents", "=", "unit", ".", "file_contents", "(", "filename", ")", "# NOTE(beisner): by default, ConfigParser does not handle options", "# with no value, such as the flags used in the mysql my.cnf file.", "# https://bugs.python.org/issue7005", "config", "=", "configparser", ".", "ConfigParser", "(", "allow_no_value", "=", "True", ")", "config", ".", "readfp", "(", "io", ".", "StringIO", "(", "file_contents", ")", ")", "return", "config" ]
Get a ConfigParser object for parsing a unit's config file.
[ "Get", "a", "ConfigParser", "object", "for", "parsing", "a", "unit", "s", "config", "file", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L171-L180
12,396
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_config_data
def validate_config_data(self, sentry_unit, config_file, section, expected): """Validate config file data. Verify that the specified section of the config file contains the expected option key:value pairs. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): return "section [{}] does not exist".format(section) for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) actual = config.get(section, k) v = expected[k] if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual): return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) return None
python
def validate_config_data(self, sentry_unit, config_file, section, expected): """Validate config file data. Verify that the specified section of the config file contains the expected option key:value pairs. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): return "section [{}] does not exist".format(section) for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) actual = config.get(section, k) v = expected[k] if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual): return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) return None
[ "def", "validate_config_data", "(", "self", ",", "sentry_unit", ",", "config_file", ",", "section", ",", "expected", ")", ":", "self", ".", "log", ".", "debug", "(", "'Validating config file data ({} in {} on {})'", "'...'", ".", "format", "(", "section", ",", "config_file", ",", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ")", ")", "config", "=", "self", ".", "_get_config", "(", "sentry_unit", ",", "config_file", ")", "if", "section", "!=", "'DEFAULT'", "and", "not", "config", ".", "has_section", "(", "section", ")", ":", "return", "\"section [{}] does not exist\"", ".", "format", "(", "section", ")", "for", "k", "in", "expected", ".", "keys", "(", ")", ":", "if", "not", "config", ".", "has_option", "(", "section", ",", "k", ")", ":", "return", "\"section [{}] is missing option {}\"", ".", "format", "(", "section", ",", "k", ")", "actual", "=", "config", ".", "get", "(", "section", ",", "k", ")", "v", "=", "expected", "[", "k", "]", "if", "(", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "v", ",", "bool", ")", "or", "isinstance", "(", "v", ",", "six", ".", "integer_types", ")", ")", ":", "# handle explicit values", "if", "actual", "!=", "v", ":", "return", "\"section [{}] {}:{} != expected {}:{}\"", ".", "format", "(", "section", ",", "k", ",", "actual", ",", "k", ",", "expected", "[", "k", "]", ")", "# handle function pointers, such as not_null or valid_ip", "elif", "not", "v", "(", "actual", ")", ":", "return", "\"section [{}] {}:{} != expected {}:{}\"", ".", "format", "(", "section", ",", "k", ",", "actual", ",", "k", ",", "expected", "[", "k", "]", ")", "return", "None" ]
Validate config file data. Verify that the specified section of the config file contains the expected option key:value pairs. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool.
[ "Validate", "config", "file", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L182-L219
12,397
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils._validate_dict_data
def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('expected: {}'.format(repr(expected))) for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: return "key '{}' does not exist".format(k) return None
python
def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('expected: {}'.format(repr(expected))) for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: return "key '{}' does not exist".format(k) return None
[ "def", "_validate_dict_data", "(", "self", ",", "expected", ",", "actual", ")", ":", "self", ".", "log", ".", "debug", "(", "'actual: {}'", ".", "format", "(", "repr", "(", "actual", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "'expected: {}'", ".", "format", "(", "repr", "(", "expected", ")", ")", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "expected", ")", ":", "if", "k", "in", "actual", ":", "if", "(", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "v", ",", "bool", ")", "or", "isinstance", "(", "v", ",", "six", ".", "integer_types", ")", ")", ":", "# handle explicit values", "if", "v", "!=", "actual", "[", "k", "]", ":", "return", "\"{}:{}\"", ".", "format", "(", "k", ",", "actual", "[", "k", "]", ")", "# handle function pointers, such as not_null or valid_ip", "elif", "not", "v", "(", "actual", "[", "k", "]", ")", ":", "return", "\"{}:{}\"", ".", "format", "(", "k", ",", "actual", "[", "k", "]", ")", "else", ":", "return", "\"key '{}' does not exist\"", ".", "format", "(", "k", ")", "return", "None" ]
Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool.
[ "Validate", "dictionary", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L221-L245
12,398
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_relation_data
def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) return self._validate_dict_data(expected, actual)
python
def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) return self._validate_dict_data(expected, actual)
[ "def", "validate_relation_data", "(", "self", ",", "sentry_unit", ",", "relation", ",", "expected", ")", ":", "actual", "=", "sentry_unit", ".", "relation", "(", "relation", "[", "0", "]", ",", "relation", "[", "1", "]", ")", "return", "self", ".", "_validate_dict_data", "(", "expected", ",", "actual", ")" ]
Validate actual relation data based on expected relation data.
[ "Validate", "actual", "relation", "data", "based", "on", "expected", "relation", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L247-L250
12,399
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils._validate_list_data
def _validate_list_data(self, expected, actual): """Compare expected list vs actual list data.""" for e in expected: if e not in actual: return "expected item {} not found in actual list".format(e) return None
python
def _validate_list_data(self, expected, actual): """Compare expected list vs actual list data.""" for e in expected: if e not in actual: return "expected item {} not found in actual list".format(e) return None
[ "def", "_validate_list_data", "(", "self", ",", "expected", ",", "actual", ")", ":", "for", "e", "in", "expected", ":", "if", "e", "not", "in", "actual", ":", "return", "\"expected item {} not found in actual list\"", ".", "format", "(", "e", ")", "return", "None" ]
Compare expected list vs actual list data.
[ "Compare", "expected", "list", "vs", "actual", "list", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L252-L257