INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Join this array with another array.
def concatenate(self, arry, axis=0): """ Join this array with another array. Paramters --------- arry : ndarray or BoltArrayLocal Another array to concatenate with axis : int, optional, default=0 The axis along which arrays will be joined. Returns ------- BoltArrayLocal """ if isinstance(arry, ndarray): from bolt import concatenate return concatenate((self, arry), axis) else: raise ValueError("other must be local array, got %s" % type(arry))
Converts a BoltArrayLocal into a BoltArraySpark
def tospark(self, sc, axis=0): """ Converts a BoltArrayLocal into a BoltArraySpark Parameters ---------- sc : SparkContext The SparkContext which will be used to create the BoltArraySpark axis : tuple or int, optional, default=0 The axis (or axes) across which this array will be parallelized Returns ------- BoltArraySpark """ from bolt import array return array(self.toarray(), sc, axis=axis)
Converts a BoltArrayLocal into an RDD
def tordd(self, sc, axis=0): """ Converts a BoltArrayLocal into an RDD Parameters ---------- sc : SparkContext The SparkContext which will be used to create the BoltArraySpark axis : tuple or int, optional, default=0 The axis (or axes) across which this array will be parallelized Returns ------- RDD[(tuple, ndarray)] """ from bolt import array return array(self.toarray(), sc, axis=axis).tordd()
Make an intermediate RDD where all records are combined into a list of keys and larger ndarray along a new 0th dimension.
def stack(self, size): """ Make an intermediate RDD where all records are combined into a list of keys and larger ndarray along a new 0th dimension. """ def tostacks(partition): keys = [] arrs = [] for key, arr in partition: keys.append(key) arrs.append(arr) if size and 0 <= size <= len(keys): yield (keys, asarray(arrs)) keys, arrs = [], [] if keys: yield (keys, asarray(arrs)) rdd = self._rdd.mapPartitions(tostacks) return self._constructor(rdd).__finalize__(self)
Unstack array and return a new BoltArraySpark via flatMap ().
def unstack(self): """ Unstack array and return a new BoltArraySpark via flatMap(). """ from bolt.spark.array import BoltArraySpark if self._rekeyed: rdd = self._rdd else: rdd = self._rdd.flatMap(lambda kv: zip(kv[0], list(kv[1]))) return BoltArraySpark(rdd, shape=self.shape, split=self.split)
Apply a function on each subarray.
def map(self, func): """ Apply a function on each subarray. Parameters ---------- func : function This is applied to each value in the intermediate RDD. Returns ------- StackedArray """ vshape = self.shape[self.split:] x = self._rdd.values().first() if x.shape == vshape: a, b = asarray([x]), asarray([x, x]) else: a, b = x, concatenate((x, x)) try: atest = func(a) btest = func(b) except Exception as e: raise RuntimeError("Error evaluating function on test array, got error:\n %s" % e) if not (isinstance(atest, ndarray) and isinstance(btest, ndarray)): raise ValueError("Function must return ndarray") # different shapes map to the same new shape elif atest.shape == btest.shape: if self._rekeyed is True: # we've already rekeyed rdd = self._rdd.map(lambda kv: (kv[0], func(kv[1]))) shape = (self.shape[0],) + atest.shape else: # do the rekeying count, rdd = zip_with_index(self._rdd.values()) rdd = rdd.map(lambda kv: ((kv[1],), func(kv[0]))) shape = (count,) + atest.shape split = 1 rekeyed = True # different shapes stay different (along the first dimension) elif atest.shape[0] == a.shape[0] and btest.shape[0] == b.shape[0]: shape = self.shape[0:self.split] + atest.shape[1:] split = self.split rdd = self._rdd.map(lambda kv: (kv[0], func(kv[1]))) rekeyed = self._rekeyed else: raise ValueError("Cannot infer effect of function on shape") return self._constructor(rdd, rekeyed=rekeyed, shape=shape, split=split).__finalize__(self)
Split values of distributed array into chunks.
def _chunk(self, size="150", axis=None, padding=None): """ Split values of distributed array into chunks. Transforms an underlying pair RDD of (key, value) into records of the form: (key, chunk id), (chunked value). Here, chunk id is a tuple identifying the chunk and chunked value is a subset of the data from each original value, that has been divided along the specified dimensions. Parameters ---------- size : str or tuple or int If str, the average size (in KB) of the chunks in all value dimensions. If int or tuple, an explicit specification of the number chunks in each value dimension. axis : tuple, optional, default=None One or more axes to estimate chunks for, if provided any other axes will use one chunk. padding: tuple or int, default = None Number of elements per dimension that will overlap with the adjacent chunk. If a tuple, specifies padding along each chunked dimension; if a int, same padding will be applied to all chunked dimensions. """ if self.split == len(self.shape) and padding is None: self._rdd = self._rdd.map(lambda kv: (kv[0]+(0,), array(kv[1], ndmin=1))) self._shape = self._shape + (1,) self._plan = (1,) self._padding = array([0]) return self rdd = self._rdd self._plan, self._padding = self.getplan(size, axis, padding) if any([x + y > z for x, y, z in zip(self.plan, self.padding, self.vshape)]): raise ValueError("Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis" % (tuple(self.plan), tuple(self.padding), tuple(self.vshape))) if any([x > y for x, y in zip(self.padding, self.plan)]): raise ValueError("Padding sizes %s cannot exceed chunk sizes %s along any axis" % (tuple(self.padding), tuple(self.plan))) slices = self.getslices(self.plan, self.padding, self.vshape) labels = list(product(*[list(enumerate(s)) for s in slices])) scheme = [list(zip(*s)) for s in labels] def _chunk(record): k, v = record[0], record[1] for (chk, slc) in scheme: if type(k) is int: k = (k,) yield k + chk, v[slc] rdd = rdd.flatMap(_chunk) return self._constructor(rdd, shape=self.shape, split=self.split, dtype=self.dtype, plan=self.plan, padding=self.padding, ordered=self._ordered)
Convert a chunked array back into a full array with ( key value ) pairs where key is a tuple of indices and value is an ndarray.
def unchunk(self): """ Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray. """ plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split nchunks = self.getnumber(plan, vshape) full_shape = concatenate((nchunks, plan)) n = len(vshape) perm = concatenate(list(zip(range(n), range(n, 2*n)))) if self.uniform: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape) else: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) k_chks = [k[split:] for k in keys] arr = empty(nchunks, dtype='object') for (i, d) in zip(k_chks, values): arr[i] = d yield keys[0][:split], allstack(arr.tolist()) # remove padding if self.padded: removepad = self.removepad rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n)))) else: rdd = self._rdd # skip partitionBy if there is not actually any chunking if array_equal(self.plan, self.vshape): rdd = rdd.map(lambda kv: (kv[0][:split], kv[1])) ordered = self._ordered else: ranges = self.kshape npartitions = int(prod(ranges)) if len(self.kshape) == 0: partitioner = lambda k: 0 else: partitioner = lambda k: ravel_multi_index(k[:split], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk) ordered = True if array_equal(self.vshape, [1]): rdd = rdd.mapValues(lambda v: squeeze(v)) newshape = self.shape[:-1] else: newshape = self.shape return BoltArraySpark(rdd, shape=newshape, split=self._split, dtype=self.dtype, ordered=ordered)
Move indices in the keys into the values.
def keys_to_values(self, axes, size=None): """ Move indices in the keys into the values. Padding on these new value-dimensions is not currently supported and is set to 0. Parameters ---------- axes : tuple Axes from keys to move to values. size : tuple, optional, default=None Size of chunks for the values along the new dimensions. If None, then no chunking for all axes (number of chunks = 1) Returns ------- ChunkedArray """ if len(axes) == 0: return self kmask = self.kmask(axes) if size is None: size = self.kshape[kmask] # update properties newplan = r_[size, self.plan] newsplit = self._split - len(axes) newshape = tuple(r_[self.kshape[~kmask], self.kshape[kmask], self.vshape].astype(int).tolist()) newpadding = r_[zeros(len(axes), dtype=int), self.padding] result = self._constructor(None, shape=newshape, split=newsplit, dtype=self.dtype, plan=newplan, padding=newpadding, ordered=True) # convert keys into chunk + within-chunk label split = self.split def _relabel(record): k, data = record keys, chks = asarray(k[:split], 'int'), k[split:] movingkeys, stationarykeys = keys[kmask], keys[~kmask] newchks = [int(m) for m in movingkeys/size] # element-wise integer division that works in Python 2 and 3 labels = mod(movingkeys, size) return tuple(stationarykeys) + tuple(newchks) + tuple(chks) + tuple(labels), data rdd = self._rdd.map(_relabel) # group the new chunks together nchunks = result.getnumber(result.plan, result.vshape) npartitions = int(prod(result.kshape) * prod(nchunks)) ranges = tuple(result.kshape) + tuple(nchunks) n = len(axes) if n == 0: s = slice(None) else: s = slice(-n) partitioner = lambda k: ravel_multi_index(k[s], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner) # reassemble the pieces in the chunks by sorting and then stacking uniform = result.uniform def _rebuild(it): ordered = sorted(it, key=lambda kv: kv[0][n:]) keys, data = zip(*ordered) k = keys[0][s] labels = asarray([x[-n:] for x in keys]) if uniform: labelshape = tuple(size) else: labelshape = tuple(amax(labels, axis=0) - amin(labels, axis=0) + 1) valshape = data[0].shape fullshape = labelshape + valshape yield k, asarray(data).reshape(fullshape) result._rdd = rdd.mapPartitions(_rebuild) if array_equal(self.vshape, [1]): result._rdd = result._rdd.mapValues(lambda v: squeeze(v)) result._shape = result.shape[:-1] result._plan = result.plan[:-1] return result
Apply an array - > array function on each subarray.
def map(self, func, value_shape=None, dtype=None): """ Apply an array -> array function on each subarray. The function can change the shape of the subarray, but only along dimensions that are not chunked. Parameters ---------- func : function Function of a single subarray to apply value_shape: Known shape of chunking plan after the map dtype: numpy.dtype, optional, default=None Known dtype of values resulting from operation Returns ------- ChunkedArray """ if value_shape is None or dtype is None: # try to compute the size of each mapped element by applying func to a random array try: mapped = func(random.randn(*self.plan).astype(self.dtype)) except Exception: first = self._rdd.first() if first: # eval func on the first element mapped = func(first[1]) if value_shape is None: value_shape = mapped.shape if dtype is None: dtype = mapped.dtype chunked_dims = where(self.plan != self.vshape)[0] unchunked_dims = where(self.plan == self.vshape)[0] # check that no dimensions are dropped if len(value_shape) != len(self.plan): raise NotImplementedError('map on ChunkedArray cannot drop dimensions') # check that chunked dimensions did not change shape if any([value_shape[i] != self.plan[i] for i in chunked_dims]): raise ValueError('map cannot change the sizes of chunked dimensions') def check_and_apply(v): new = func(v) if len(unchunked_dims) > 0: if any([new.shape[i] != value_shape[i] for i in unchunked_dims]): raise Exception("Map operation did not produce values of uniform shape.") if len(chunked_dims) > 0: if any([v.shape[i] != new.shape[i] for i in chunked_dims]): raise Exception("Map operation changed the size of a chunked dimension") return new rdd = self._rdd.mapValues(check_and_apply) vshape = [value_shape[i] if i in unchunked_dims else self.vshape[i] for i in range(len(self.vshape))] newshape = r_[self.kshape, vshape].astype(int).tolist() return self._constructor(rdd, shape=tuple(newshape), dtype=dtype, plan=asarray(value_shape)).__finalize__(self)
Apply a generic array - > object to each subarray
def map_generic(self, func): """ Apply a generic array -> object to each subarray The resulting object is a BoltArraySpark of dtype object where the blocked dimensions are replaced with indices indication block ID. """ def process_record(val): newval = empty(1, dtype="object") newval[0] = func(val) return newval rdd = self._rdd.mapValues(process_record) nchunks = self.getnumber(self.plan, self.vshape) newshape = tuple([int(s) for s in r_[self.kshape, nchunks]]) newsplit = len(self.shape) return BoltArraySpark(rdd, shape=newshape, split=newsplit, ordered=self._ordered, dtype="object")
Identify a plan for chunking values along each dimension.
def getplan(self, size="150", axes=None, padding=None): """ Identify a plan for chunking values along each dimension. Generates an ndarray with the size (in number of elements) of chunks in each dimension. If provided, will estimate chunks for only a subset of axes, leaving all others to the full size of the axis. Parameters ---------- size : string or tuple If str, the average size (in KB) of the chunks in all value dimensions. If int/tuple, an explicit specification of the number chunks in each moving value dimension. axes : tuple, optional, default=None One or more axes to estimate chunks for, if provided any other axes will use one chunk. padding : tuple or int, option, default=None Size over overlapping padding between chunks in each dimension. If tuple, specifies padding along each chunked dimension; if int, all dimensions use same padding; if None, no padding """ from numpy import dtype as gettype # initialize with all elements in one chunk plan = self.vshape # check for subset of axes if axes is None: if isinstance(size, str): axes = arange(len(self.vshape)) else: axes = arange(len(size)) else: axes = asarray(axes, 'int') # set padding pad = array(len(self.vshape)*[0, ]) if padding is not None: pad[axes] = padding # set the plan if isinstance(size, tuple): plan[axes] = size elif isinstance(size, str): # convert from kilobytes size = 1000.0 * float(size) # calculate from dtype elsize = gettype(self.dtype).itemsize nelements = prod(self.vshape) dims = self.vshape[self.vmask(axes)] if size <= elsize: s = ones(len(axes)) else: remsize = 1.0 * nelements * elsize s = [] for (i, d) in enumerate(dims): minsize = remsize/d if minsize >= size: s.append(1) remsize = minsize continue else: s.append(min(d, floor(size/minsize))) s[i+1:] = plan[i+1:] break plan[axes] = s else: raise ValueError("Chunk size not understood, must be tuple or int") return plan, pad
Remove the padding from chunks.
def removepad(idx, value, number, padding, axes=None): """ Remove the padding from chunks. Given a chunk and its corresponding index, use the plan and padding to remove any padding from the chunk along with specified axes. Parameters ---------- idx: tuple or array-like The chunk index, indicating which chunk this is. value: ndarray The chunk that goes along with the index. number: ndarray or array-like The number of chunks along each dimension. padding: ndarray or array-like The padding scheme. axes: tuple, optional, default = None The axes (in the values) along which to remove padding. """ if axes is None: axes = range(len(number)) mask = len(number)*[False, ] for i in range(len(mask)): if i in axes and padding[i] != 0: mask[i] = True starts = [0 if (i == 0 or not m) else p for (i, m, p) in zip(idx, mask, padding)] stops = [None if (i == n-1 or not m) else -p for (i, m, p, n) in zip(idx, mask, padding, number)] slices = [slice(i1, i2) for (i1, i2) in zip(starts, stops)] return value[slices]
Obtain number of chunks for the given dimensions and chunk sizes.
def getnumber(plan, shape): """ Obtain number of chunks for the given dimensions and chunk sizes. Given a plan for the number of chunks along each dimension, calculate the number of chunks that this will lead to. Parameters ---------- plan: tuple or array-like Size of chunks (in number of elements) along each dimensions. Length must be equal to the number of dimensions. shape : tuple Shape of array to be chunked. """ nchunks = [] for size, d in zip(plan, shape): nchunks.append(int(ceil(1.0 * d/size))) return nchunks
Obtain slices for the given dimensions padding and chunks.
def getslices(plan, padding, shape): """ Obtain slices for the given dimensions, padding, and chunks. Given a plan for the number of chunks along each dimension and the amount of padding, calculate a list of slices required to generate those chunks. Parameters ---------- plan: tuple or array-like Size of chunks (in number of elements) along each dimensions. Length must be equal to the number of dimensions. padding: tuple or array-like Size of overlap (in number of elements) between chunks along each dimension. Length must be equal to the number of dimensions. shape: tuple Dimensions of axes to be chunked. """ slices = [] for size, pad, d in zip(plan, padding, shape): nchunks = int(floor(d/size)) remainder = d % size start = 0 dimslices = [] for idx in range(nchunks): end = start + size # left endpoint if idx == 0: left = start else: left = start - pad # right endpoint if idx == nchunks: right = end else: right = end + pad dimslices.append(slice(left, right, 1)) start = end if remainder: dimslices.append(slice(end - pad, d, 1)) slices.append(dimslices) return slices
Obtain a binary mask by setting a subset of entries to true.
def getmask(inds, n): """ Obtain a binary mask by setting a subset of entries to true. Parameters ---------- inds : array-like Which indices to set as true. n : int The length of the target mask. """ inds = asarray(inds, 'int') mask = zeros(n, dtype=bool) mask[inds] = True return mask
Repartitions the underlying RDD
def repartition(self, npartitions): """ Repartitions the underlying RDD Parameters ---------- npartitions : int Number of partitions to repartion the underlying RDD to """ rdd = self._rdd.repartition(npartitions) return self._constructor(rdd, ordered=False).__finalize__(self)
Aggregates records of a distributed array.
def stack(self, size=None): """ Aggregates records of a distributed array. Stacking should improve the performance of vectorized operations, but the resulting StackedArray object only exposes a restricted set of operations (e.g. map, reduce). The unstack method can be used to restore the full bolt array. Parameters ---------- size : int, optional, default=None The maximum size for each stack (number of original records), will aggregate groups of records per partition up to this size, if None will aggregate all records on each partition. Returns ------- StackedArray """ stk = StackedArray(self._rdd, shape=self.shape, split=self.split) return stk.stack(size)
Align spark bolt array so that axes for iteration are in the keys.
def _align(self, axis): """ Align spark bolt array so that axes for iteration are in the keys. This operation is applied before most functional operators. It ensures that the specified axes are valid, and swaps key/value axes so that functional operators can be applied over the correct records. Parameters ---------- axis: tuple[int] One or more axes that wil be iterated over by a functional operator Returns ------- BoltArraySpark """ # ensure that the specified axes are valid inshape(self.shape, axis) # find the value axes that should be moved into the keys (axis >= split) tokeys = [(a - self.split) for a in axis if a >= self.split] # find the key axes that should be moved into the values (axis < split) tovalues = [a for a in range(self.split) if a not in axis] if tokeys or tovalues: return self.swap(tovalues, tokeys) else: return self
Return the first element of an array
def first(self): """ Return the first element of an array """ from bolt.local.array import BoltArrayLocal rdd = self._rdd if self._ordered else self._rdd.sortByKey() return BoltArrayLocal(rdd.values().first())
Apply a function across an axis.
def map(self, func, axis=(0,), value_shape=None, dtype=None, with_keys=False): """ Apply a function across an axis. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function of a single array to apply. If with_keys=True, function should be of a (tuple, array) pair. axis : tuple or int, optional, default=(0,) Axis or multiple axes to apply function along. value_shape : tuple, optional, default=None Known shape of values resulting from operation dtype: numpy.dtype, optional, default=None Known dtype of values resulting from operation with_keys : bool, optional, default=False Include keys as an argument to the function Returns ------- BoltArraySpark """ axis = tupleize(axis) swapped = self._align(axis) if with_keys: test_func = lambda x: func(((0,), x)) else: test_func = func if value_shape is None or dtype is None: # try to compute the size of each mapped element by applying func to a random array try: mapped = test_func(random.randn(*swapped.values.shape).astype(self.dtype)) except Exception: first = swapped._rdd.first() if first: # eval func on the first element mapped = test_func(first[1]) if value_shape is None: value_shape = mapped.shape if dtype is None: dtype = mapped.dtype shape = tuple([swapped._shape[ax] for ax in range(len(axis))]) + tupleize(value_shape) if with_keys: rdd = swapped._rdd.map(lambda kv: (kv[0], func(kv))) else: rdd = swapped._rdd.mapValues(func) # reshaping will fail if the elements aren't uniformly shaped def check(v): if len(v.shape) > 0 and v.shape != tupleize(value_shape): raise Exception("Map operation did not produce values of uniform shape.") return v rdd = rdd.mapValues(lambda v: check(v)) return self._constructor(rdd, shape=shape, dtype=dtype, split=swapped.split).__finalize__(swapped)
Filter array along an axis.
def filter(self, func, axis=(0,), sort=False): """ Filter array along an axis. Applies a function which should evaluate to boolean, along a single axis or multiple axes. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function to apply, should return boolean axis : tuple or int, optional, default=(0,) Axis or multiple axes to filter along. sort: bool, optional, default=False Whether or not to sort by key before reindexing Returns ------- BoltArraySpark """ axis = tupleize(axis) swapped = self._align(axis) def f(record): return func(record[1]) rdd = swapped._rdd.filter(f) if sort: rdd = rdd.sortByKey().values() else: rdd = rdd.values() # count the resulting array in order to reindex (linearize) the keys count, zipped = zip_with_index(rdd) if not count: count = zipped.count() reindexed = zipped.map(lambda kv: (tupleize(kv[1]), kv[0])) # since we can only filter over one axis, the remaining shape is always the following remaining = list(swapped.shape[len(axis):]) if count != 0: shape = tuple([count] + remaining) else: shape = (0,) return self._constructor(reindexed, shape=shape, split=1).__finalize__(swapped)
Reduce an array along an axis.
def reduce(self, func, axis=(0,), keepdims=False): """ Reduce an array along an axis. Applies a commutative/associative function of two arguments cumulatively to all arrays along an axis. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function of two arrays that returns a single array axis : tuple or int, optional, default=(0,) Axis or multiple axes to reduce along. Returns ------- BoltArraySpark """ from bolt.local.array import BoltArrayLocal from numpy import ndarray axis = tupleize(axis) swapped = self._align(axis) arr = swapped._rdd.values().treeReduce(func, depth=3) if keepdims: for i in axis: arr = expand_dims(arr, axis=i) if not isinstance(arr, ndarray): # the result of a reduce can also be a scalar return arr elif arr.shape == (1,): # ndarrays with single values in them should be converted into scalars return arr[0] return BoltArrayLocal(arr)
Compute a statistic over an axis.
def _stat(self, axis=None, func=None, name=None, keepdims=False): """ Compute a statistic over an axis. Can provide either a function (for use in a reduce) or a name (for use by a stat counter). Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes func : function, optional, default=None Function for reduce, see BoltArraySpark.reduce name : str A named statistic, see StatCounter keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ if axis is None: axis = list(range(len(self.shape))) axis = tupleize(axis) if func and not name: return self.reduce(func, axis, keepdims) if name and not func: from bolt.local.array import BoltArrayLocal swapped = self._align(axis) def reducer(left, right): return left.combine(right) counter = swapped._rdd.values()\ .mapPartitions(lambda i: [StatCounter(values=i, stats=name)])\ .treeReduce(reducer, depth=3) arr = getattr(counter, name) if keepdims: for i in axis: arr = expand_dims(arr, axis=i) return BoltArrayLocal(arr).toscalar() else: raise ValueError('Must specify either a function or a statistic name.')
Return the mean of the array over the given axis.
def mean(self, axis=None, keepdims=False): """ Return the mean of the array over the given axis. Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ return self._stat(axis, name='mean', keepdims=keepdims)
Return the variance of the array over the given axis.
def var(self, axis=None, keepdims=False): """ Return the variance of the array over the given axis. Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ return self._stat(axis, name='variance', keepdims=keepdims)
Return the standard deviation of the array over the given axis.
def std(self, axis=None, keepdims=False): """ Return the standard deviation of the array over the given axis. Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ return self._stat(axis, name='stdev', keepdims=keepdims)
Return the sum of the array over the given axis.
def sum(self, axis=None, keepdims=False): """ Return the sum of the array over the given axis. Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ from operator import add return self._stat(axis, func=add, keepdims=keepdims)
Return the maximum of the array over the given axis.
def max(self, axis=None, keepdims=False): """ Return the maximum of the array over the given axis. Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ from numpy import maximum return self._stat(axis, func=maximum, keepdims=keepdims)
Return the minimum of the array over the given axis.
def min(self, axis=None, keepdims=False): """ Return the minimum of the array over the given axis. Parameters ---------- axis : tuple or int, optional, default=None Axis to compute statistic over, if None will compute over all axes keepdims : boolean, optional, default=False Keep axis remaining after operation with size 1. """ from numpy import minimum return self._stat(axis, func=minimum, keepdims=keepdims)
Join this array with another array.
def concatenate(self, arry, axis=0): """ Join this array with another array. Paramters --------- arry : ndarray, BoltArrayLocal, or BoltArraySpark Another array to concatenate with axis : int, optional, default=0 The axis along which arrays will be joined. Returns ------- BoltArraySpark """ if isinstance(arry, ndarray): from bolt.spark.construct import ConstructSpark arry = ConstructSpark.array(arry, self._rdd.context, axis=range(0, self.split)) else: if not isinstance(arry, BoltArraySpark): raise ValueError("other must be local array or spark array, got %s" % type(arry)) if not all([x == y if not i == axis else True for i, (x, y) in enumerate(zip(self.shape, arry.shape))]): raise ValueError("all the input array dimensions except for " "the concatenation axis must match exactly") if not self.split == arry.split: raise NotImplementedError("two arrays must have the same split ") if axis < self.split: shape = self.keys.shape def key_func(key): key = list(key) key[axis] += shape[axis] return tuple(key) rdd = self._rdd.union(arry._rdd.map(lambda kv: (key_func(kv[0]), kv[1]))) else: from numpy import concatenate as npconcatenate shift = axis - self.split rdd = self._rdd.join(arry._rdd).map(lambda kv: (kv[0], npconcatenate(kv[1], axis=shift))) shape = tuple([x + y if i == axis else x for i, (x, y) in enumerate(zip(self.shape, arry.shape))]) return self._constructor(rdd, shape=shape, ordered=False).__finalize__(self)
Basic indexing ( for slices or ints ).
def _getbasic(self, index): """ Basic indexing (for slices or ints). """ key_slices = index[0:self.split] value_slices = index[self.split:] def key_check(key): def inrange(k, s): if s.step > 0: return s.start <= k < s.stop else: return s.stop < k <= s.start def check(k, s): return inrange(k, s) and mod(k - s.start, s.step) == 0 out = [check(k, s) for k, s in zip(key, key_slices)] return all(out) def key_func(key): return tuple([(k - s.start)/s.step for k, s in zip(key, key_slices)]) filtered = self._rdd.filter(lambda kv: key_check(kv[0])) if self._split == self.ndim: rdd = filtered.map(lambda kv: (key_func(kv[0]), kv[1])) else: # handle use of use slice.stop = -1 for a special case (see utils.slicify) value_slices = [s if s.stop != -1 else slice(s.start, None, s.step) for s in value_slices] rdd = filtered.map(lambda kv: (key_func(kv[0]), kv[1][value_slices])) shape = tuple([int(ceil((s.stop - s.start) / float(s.step))) for s in index]) split = self.split return rdd, shape, split
Advanced indexing ( for sets lists or ndarrays ).
def _getadvanced(self, index): """ Advanced indexing (for sets, lists, or ndarrays). """ index = [asarray(i) for i in index] shape = index[0].shape if not all([i.shape == shape for i in index]): raise ValueError("shape mismatch: indexing arrays could not be broadcast " "together with shapes " + ("%s " * self.ndim) % tuple([i.shape for i in index])) index = tuple([listify(i, d) for (i, d) in zip(index, self.shape)]) # build tuples with target indices key_tuples = list(zip(*index[0:self.split])) value_tuples = list(zip(*index[self.split:])) # build dictionary to look up targets in values d = {} for k, g in groupby(zip(value_tuples, key_tuples), lambda x: x[1]): d[k] = map(lambda x: x[0], list(g)) def key_check(key): return key in key_tuples def key_func(key): return unravel_index(key, shape) # filter records based on key targets filtered = self._rdd.filter(lambda kv: key_check(kv[0])) # subselect and flatten records based on value targets (if they exist) if len(value_tuples) > 0: flattened = filtered.flatMap(lambda kv: [(kv[0], kv[1][i]) for i in d[kv[0]]]) else: flattened = filtered # reindex indexed = flattened.zipWithIndex() rdd = indexed.map(lambda kkv: (key_func(kkv[1]), kkv[0][1])) split = len(shape) return rdd, shape, split
Mixed indexing ( combines basic and advanced indexes )
def _getmixed(self, index): """ Mixed indexing (combines basic and advanced indexes) Assumes that only a single advanced index is used, due to the complicated behavior needed to be compatible with NumPy otherwise. """ # find the single advanced index loc = where([isinstance(i, (tuple, list, ndarray)) for i in index])[0][0] idx = list(index[loc]) if isinstance(idx[0], (tuple, list, ndarray)): raise ValueError("When mixing basic and advanced indexing, " "advanced index must be one-dimensional") # single advanced index is on a key -- filter and update key if loc < self.split: def newkey(key): newkey = list(key) newkey[loc] = idx.index(key[loc]) return tuple(newkey) rdd = self._rdd.filter(lambda kv: kv[0][loc] in idx).map(lambda kv: (newkey(kv[0]), kv[1])) # single advanced index is on a value -- use NumPy indexing else: slices = [slice(0, None, None) for _ in self.values.shape] slices[loc - self.split] = idx rdd = self._rdd.map(lambda kv: (kv[0], kv[1][slices])) newshape = list(self.shape) newshape[loc] = len(idx) barray = self._constructor(rdd, shape=tuple(newshape)).__finalize__(self) # apply the rest of the simple indices new_index = index[:] new_index[loc] = slice(0, None, None) barray = barray[tuple(new_index)] return barray._rdd, barray.shape, barray.split
Chunks records of a distributed array.
def chunk(self, size="150", axis=None, padding=None): """ Chunks records of a distributed array. Chunking breaks arrays into subarrays, using an specified size of chunks along each value dimension. Can alternatively specify an average chunk byte size (in kilobytes) and the size of chunks (as ints) will be computed automatically. Parameters ---------- size : tuple, int, or str, optional, default = "150" A string giving the size in kilobytes, or a tuple with the size of chunks along each dimension. axis : int or tuple, optional, default = None One or more axis to chunk array along, if None will use all axes, padding: tuple or int, default = None Number of elements per dimension that will overlap with the adjacent chunk. If a tuple, specifies padding along each chunked dimension; if a int, same padding will be applied to all chunked dimensions. Returns ------- ChunkedArray """ if type(size) is not str: size = tupleize((size)) axis = tupleize((axis)) padding = tupleize((padding)) from bolt.spark.chunk import ChunkedArray chnk = ChunkedArray(rdd=self._rdd, shape=self._shape, split=self._split, dtype=self._dtype) return chnk._chunk(size, axis, padding)
Swap axes from keys to values.
def swap(self, kaxes, vaxes, size="150"): """ Swap axes from keys to values. This is the core operation underlying shape manipulation on the Spark bolt array. It exchanges an arbitrary set of axes between the keys and the valeus. If either is None, will only move axes in one direction (from keys to values, or values to keys). Keys moved to values will be placed immediately after the split; values moved to keys will be placed immediately before the split. Parameters ---------- kaxes : tuple Axes from keys to move to values vaxes : tuple Axes from values to move to keys size : tuple or int, optional, default = "150" Can either provide a string giving the size in kilobytes, or a tuple with the number of chunks along each value dimension being moved Returns ------- BoltArraySpark """ kaxes = asarray(tupleize(kaxes), 'int') vaxes = asarray(tupleize(vaxes), 'int') if type(size) is not str: size = tupleize(size) if len(kaxes) == self.keys.ndim and len(vaxes) == 0: raise ValueError('Cannot perform a swap that would ' 'end up with all data on a single key') if len(kaxes) == 0 and len(vaxes) == 0: return self from bolt.spark.chunk import ChunkedArray chunks = self.chunk(size) swapped = chunks.keys_to_values(kaxes).values_to_keys([v+len(kaxes) for v in vaxes]) barray = swapped.unchunk() return barray
Return an array with the axes transposed.
def transpose(self, *axes): """ Return an array with the axes transposed. This operation will incur a swap unless the desiured permutation can be obtained only by transpoing the keys or the values. Parameters ---------- axes : None, tuple of ints, or n ints If None, will reverse axis order. """ if len(axes) == 0: p = arange(self.ndim-1, -1, -1) else: p = asarray(argpack(axes)) istransposeable(p, range(self.ndim)) split = self.split # compute the keys/value axes that need to be swapped new_keys, new_values = p[:split], p[split:] swapping_keys = sort(new_values[new_values < split]) swapping_values = sort(new_keys[new_keys >= split]) stationary_keys = sort(new_keys[new_keys < split]) stationary_values = sort(new_values[new_values >= split]) # compute the permutation that the swap causes p_swap = r_[stationary_keys, swapping_values, swapping_keys, stationary_values] # compute the extra permutation (p_x) on top of this that # needs to happen to get the full permutation desired p_swap_inv = argsort(p_swap) p_x = p_swap_inv[p] p_keys, p_values = p_x[:split], p_x[split:]-split # perform the swap and the the within key/value permutations arr = self.swap(swapping_keys, swapping_values-split) arr = arr.keys.transpose(tuple(p_keys.tolist())) arr = arr.values.transpose(tuple(p_values.tolist())) return arr
Return the array with two axes interchanged.
def swapaxes(self, axis1, axis2): """ Return the array with two axes interchanged. Parameters ---------- axis1 : int The first axis to swap axis2 : int The second axis to swap """ p = list(range(self.ndim)) p[axis1] = axis2 p[axis2] = axis1 return self.transpose(p)
Return an array with the same data but a new shape.
def reshape(self, *shape): """ Return an array with the same data but a new shape. Currently only supports reshaping that independently reshapes the keys, or the values, or both. Parameters ---------- shape : tuple of ints, or n ints New shape """ new = argpack(shape) isreshapeable(new, self.shape) if new == self.shape: return self i = self._reshapebasic(new) if i == -1: raise NotImplementedError("Currently no support for reshaping between " "keys and values for BoltArraySpark") else: new_key_shape, new_value_shape = new[:i], new[i:] return self.keys.reshape(new_key_shape).values.reshape(new_value_shape)
Check if the requested reshape can be broken into independant reshapes on the keys and values. If it can returns the index in the new shape separating keys from values otherwise returns - 1
def _reshapebasic(self, shape): """ Check if the requested reshape can be broken into independant reshapes on the keys and values. If it can, returns the index in the new shape separating keys from values, otherwise returns -1 """ new = tupleize(shape) old_key_size = prod(self.keys.shape) old_value_size = prod(self.values.shape) for i in range(len(new)): new_key_size = prod(new[:i]) new_value_size = prod(new[i:]) if new_key_size == old_key_size and new_value_size == old_value_size: return i return -1
Remove one or more single - dimensional axes from the array.
def squeeze(self, axis=None): """ Remove one or more single-dimensional axes from the array. Parameters ---------- axis : tuple or int One or more singleton axes to remove. """ if not any([d == 1 for d in self.shape]): return self if axis is None: drop = where(asarray(self.shape) == 1)[0] elif isinstance(axis, int): drop = asarray((axis,)) elif isinstance(axis, tuple): drop = asarray(axis) else: raise ValueError("an integer or tuple is required for the axis") if any([self.shape[i] > 1 for i in drop]): raise ValueError("cannot select an axis to squeeze out which has size greater than one") if any(asarray(drop) < self.split): kmask = set([d for d in drop if d < self.split]) kfunc = lambda k: tuple([kk for ii, kk in enumerate(k) if ii not in kmask]) else: kfunc = lambda k: k if any(asarray(drop) >= self.split): vmask = tuple([d - self.split for d in drop if d >= self.split]) vfunc = lambda v: v.squeeze(vmask) else: vfunc = lambda v: v rdd = self._rdd.map(lambda kv: (kfunc(kv[0]), vfunc(kv[1]))) shape = tuple([ss for ii, ss in enumerate(self.shape) if ii not in drop]) split = len([d for d in range(self.keys.ndim) if d not in drop]) return self._constructor(rdd, shape=shape, split=split).__finalize__(self)
Cast the array to a specified type.
def astype(self, dtype, casting='unsafe'): """ Cast the array to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to cast the array to (see numpy) """ rdd = self._rdd.mapValues(lambda v: v.astype(dtype, 'K', casting)) return self._constructor(rdd, dtype=dtype).__finalize__(self)
Clip values above and below.
def clip(self, min=None, max=None): """ Clip values above and below. Parameters ---------- min : scalar or array-like Minimum value. If array, will be broadcasted max : scalar or array-like Maximum value. If array, will be broadcasted. """ rdd = self._rdd.mapValues(lambda v: v.clip(min=min, max=max)) return self._constructor(rdd).__finalize__(self)
Returns the contents as a local array.
def toarray(self): """ Returns the contents as a local array. Will likely cause memory problems for large objects. """ rdd = self._rdd if self._ordered else self._rdd.sortByKey() x = rdd.values().collect() return asarray(x).reshape(self.shape)
Coerce singletons and lists and ndarrays to tuples.
def tupleize(arg): """ Coerce singletons and lists and ndarrays to tuples. Parameters ---------- arg : tuple, list, ndarray, or singleton Item to coerce """ if arg is None: return None if not isinstance(arg, (tuple, list, ndarray, Iterable)): return tuple((arg,)) elif isinstance(arg, (list, ndarray)): return tuple(arg) elif isinstance(arg, Iterable) and not isinstance(arg, str): return tuple(arg) else: return arg
Coerce a list of arguments to a tuple.
def argpack(args): """ Coerce a list of arguments to a tuple. Parameters ---------- args : tuple or nested tuple Pack arguments into a tuple, converting ((,...),) or (,) -> (,) """ if isinstance(args[0], (tuple, list, ndarray)): return tupleize(args[0]) elif isinstance(args[0], Iterable) and not isinstance(args[0], str): # coerce any iterable into a list before calling tupleize (Python 3 compatibility) return tupleize(list(args[0])) else: return tuple(args)
Checks to see if a list of axes are contained within an array shape.
def inshape(shape, axes): """ Checks to see if a list of axes are contained within an array shape. Parameters ---------- shape : tuple[int] the shape of a BoltArray axes : tuple[int] the axes to check against shape """ valid = all([(axis < len(shape)) and (axis >= 0) for axis in axes]) if not valid: raise ValueError("axes not valid for an ndarray of shape: %s" % str(shape))
Test that a and b are close and match in shape.
def allclose(a, b): """ Test that a and b are close and match in shape. Parameters ---------- a : ndarray First array to check b : ndarray First array to check """ from numpy import allclose return (a.shape == b.shape) and allclose(a, b)
Flatten lists of indices and ensure bounded by a known dim.
def listify(lst, dim): """ Flatten lists of indices and ensure bounded by a known dim. Parameters ---------- lst : list List of integer indices dim : tuple Bounds for indices """ if not all([l.dtype == int for l in lst]): raise ValueError("indices must be integers") if npany(asarray(lst) >= dim): raise ValueError("indices out of bounds for axis with size %s" % dim) return lst.flatten()
Force a slice to have defined start stop and step from a known dim. Start and stop will always be positive. Step may be negative.
def slicify(slc, dim): """ Force a slice to have defined start, stop, and step from a known dim. Start and stop will always be positive. Step may be negative. There is an exception where a negative step overflows the stop needs to have the default value set to -1. This is the only case of a negative start/stop value. Parameters ---------- slc : slice or int The slice to modify, or int to convert to a slice dim : tuple Bound for slice """ if isinstance(slc, slice): # default limits start = 0 if slc.start is None else slc.start stop = dim if slc.stop is None else slc.stop step = 1 if slc.step is None else slc.step # account for negative indices if start < 0: start += dim if stop < 0: stop += dim # account for over-flowing the bounds if step > 0: if start < 0: start = 0 if stop > dim: stop = dim else: if stop < 0: stop = -1 if start > dim: start = dim-1 return slice(start, stop, step) elif isinstance(slc, int): if slc < 0: slc += dim return slice(slc, slc+1, 1) else: raise ValueError("Type for slice %s not recongized" % type(slc))
Check to see if a proposed tuple of axes is a valid permutation of an old set of axes. Checks length axis repetion and bounds.
def istransposeable(new, old): """ Check to see if a proposed tuple of axes is a valid permutation of an old set of axes. Checks length, axis repetion, and bounds. Parameters ---------- new : tuple tuple of proposed axes old : tuple tuple of old axes """ new, old = tupleize(new), tupleize(old) if not len(new) == len(old): raise ValueError("Axes do not match axes of keys") if not len(set(new)) == len(set(old)): raise ValueError("Repeated axes") if any(n < 0 for n in new) or max(new) > len(old) - 1: raise ValueError("Invalid axes")
Check to see if a proposed tuple of axes is a valid reshaping of the old axes by ensuring that they can be factored.
def isreshapeable(new, old): """ Check to see if a proposed tuple of axes is a valid reshaping of the old axes by ensuring that they can be factored. Parameters ---------- new : tuple tuple of proposed axes old : tuple tuple of old axes """ new, old = tupleize(new), tupleize(old) if not prod(new) == prod(old): raise ValueError("Total size of new keys must remain unchanged")
If an ndarray has been split into multiple chunks by splitting it along each axis at a number of locations this function rebuilds the original array from chunks.
def allstack(vals, depth=0): """ If an ndarray has been split into multiple chunks by splitting it along each axis at a number of locations, this function rebuilds the original array from chunks. Parameters ---------- vals : nested lists of ndarrays each level of nesting of the lists representing a dimension of the original array. """ if type(vals[0]) is ndarray: return concatenate(vals, axis=depth) else: return concatenate([allstack(x, depth+1) for x in vals], axis=depth)
Expand dimensions by iteratively append empty axes.
def iterexpand(arry, extra): """ Expand dimensions by iteratively append empty axes. Parameters ---------- arry : ndarray The original array extra : int The number of empty axes to append """ for d in range(arry.ndim, arry.ndim+extra): arry = expand_dims(arry, axis=d) return arry
Alternate version of Spark s zipWithIndex that eagerly returns count.
def zip_with_index(rdd): """ Alternate version of Spark's zipWithIndex that eagerly returns count. """ starts = [0] if rdd.getNumPartitions() > 1: nums = rdd.mapPartitions(lambda it: [sum(1 for _ in it)]).collect() count = sum(nums) for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) else: count = rdd.count() def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return count, rdd.mapPartitionsWithIndex(func)
Decorator to append routed docstrings
def wrapped(f): """ Decorator to append routed docstrings """ import inspect def extract(func): append = "" args = inspect.getargspec(func) for i, a in enumerate(args.args): if i < (len(args) - len(args.defaults)): append += str(a) + ", " else: default = args.defaults[i-len(args.defaults)] if hasattr(default, "__name__"): default = default.__name__ else: default = str(default) append += str(a) + "=" + default + ", " append = append[:-2] + ")" return append doc = f.__doc__ + "\n" doc += " local -> array(" + extract(getattr(ConstructLocal, f.__name__)) + "\n" doc += " spark -> array(" + extract(getattr(ConstructSpark, f.__name__)) + "\n" f.__doc__ = doc return f
Use arguments to route constructor.
def lookup(*args, **kwargs): """ Use arguments to route constructor. Applies a series of checks on arguments to identify constructor, starting with known keyword arguments, and then applying constructor-specific checks """ if 'mode' in kwargs: mode = kwargs['mode'] if mode not in constructors: raise ValueError('Mode %s not supported' % mode) del kwargs['mode'] return constructors[mode] else: for mode, constructor in constructors: if constructor._argcheck(*args, **kwargs): return constructor return ConstructLocal
Reshape just the keys of a BoltArraySpark returning a new BoltArraySpark.
def reshape(self, *shape): """ Reshape just the keys of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- shape : tuple New proposed axes. """ new = argpack(shape) old = self.shape isreshapeable(new, old) if new == old: return self._barray def f(k): return unravel_index(ravel_multi_index(k, old), new) newrdd = self._barray._rdd.map(lambda kv: (f(kv[0]), kv[1])) newsplit = len(new) newshape = new + self._barray.values.shape return BoltArraySpark(newrdd, shape=newshape, split=newsplit).__finalize__(self._barray)
Transpose just the keys of a BoltArraySpark returning a new BoltArraySpark.
def transpose(self, *axes): """ Transpose just the keys of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- axes : tuple New proposed axes. """ new = argpack(axes) old = range(self.ndim) istransposeable(new, old) if new == old: return self._barray def f(k): return tuple(k[i] for i in new) newrdd = self._barray._rdd.map(lambda kv: (f(kv[0]), kv[1])) newshape = tuple(self.shape[i] for i in new) + self._barray.values.shape return BoltArraySpark(newrdd, shape=newshape, ordered=False).__finalize__(self._barray)
Reshape just the values of a BoltArraySpark returning a new BoltArraySpark.
def reshape(self, *shape): """ Reshape just the values of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- shape : tuple New proposed axes. """ new = argpack(shape) old = self.shape isreshapeable(new, old) if new == old: return self._barray def f(v): return v.reshape(new) newrdd = self._barray._rdd.mapValues(f) newshape = self._barray.keys.shape + new return BoltArraySpark(newrdd, shape=newshape).__finalize__(self._barray)
Transpose just the values of a BoltArraySpark returning a new BoltArraySpark.
def transpose(self, *axes): """ Transpose just the values of a BoltArraySpark, returning a new BoltArraySpark. Parameters ---------- axes : tuple New proposed axes. """ new = argpack(axes) old = range(self.ndim) istransposeable(new, old) if new == old: return self._barray def f(v): return v.transpose(new) newrdd = self._barray._rdd.mapValues(f) newshape = self._barray.keys.shape + tuple(self.shape[i] for i in new) return BoltArraySpark(newrdd, shape=newshape).__finalize__(self._barray)
Create a local bolt array of ones.
def ones(shape, dtype=float64, order='C'): """ Create a local bolt array of ones. Parameters ---------- shape : tuple Dimensions of the desired array dtype : data-type, optional, default=float64 The desired data-type for the array. (see numpy) order : {'C', 'F', 'A'}, optional, default='C' The order of the array. (see numpy) Returns ------- BoltArrayLocal """ from numpy import ones return ConstructLocal._wrap(ones, shape, dtype, order)
Create a local bolt array of zeros.
def zeros(shape, dtype=float64, order='C'): """ Create a local bolt array of zeros. Parameters ---------- shape : tuple Dimensions of the desired array. dtype : data-type, optional, default=float64 The desired data-type for the array. (see numpy) order : {'C', 'F', 'A'}, optional, default='C' The order of the array. (see numpy) Returns ------- BoltArrayLocal """ from numpy import zeros return ConstructLocal._wrap(zeros, shape, dtype, order)
Join a sequence of arrays together.
def concatenate(arrays, axis=0): """ Join a sequence of arrays together. Parameters ---------- arrays : tuple A sequence of array-like e.g. (a1, a2, ...) axis : int, optional, default=0 The axis along which the arrays will be joined. Returns ------- BoltArrayLocal """ if not isinstance(arrays, tuple): raise ValueError("data type not understood") arrays = tuple([asarray(a) for a in arrays]) from numpy import concatenate return BoltArrayLocal(concatenate(arrays, axis))
Returns A and B in y = Ax^B http:// mathworld. wolfram. com/ LeastSquaresFittingPowerLaw. html
def plfit_lsq(x,y): """ Returns A and B in y=Ax^B http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html """ n = len(x) btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum() bbottom = n*(log(x)**2).sum() - (log(x).sum())**2 b = btop / bbottom a = ( log(y).sum() - b * log(x).sum() ) / n A = exp(a) return A,b
A Python implementation of the Matlab code http:// www. santafe. edu/ ~aaronc/ powerlaws/ plfit. m from http:// www. santafe. edu/ ~aaronc/ powerlaws/
def plfit(x,nosmall=False,finite=False): """ A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m from http://www.santafe.edu/~aaronc/powerlaws/ See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, to appear (2009). (arXiv:0706.1062) http://arxiv.org/abs/0706.1062 """ xmins = unique(x) xmins = xmins[1:-1] dat = xmins * 0 z = sort(x) for xm in arange(len(xmins)): xmin = xmins[xm] z = z[z>=xmin] n = float(len(z)) # estimate alpha using direct MLE a = n / sum( log(z/xmin) ) if nosmall: # 4. For continuous data, PLFIT can return erroneously large estimates of # alpha when xmin is so large that the number of obs x >= xmin is very # small. To prevent this, we can truncate the search over xmin values # before the finite-size bias becomes significant by calling PLFIT as if (a-1)/sqrt(n) > 0.1: #dat(xm:end) = []; dat = dat[:xm] xm = len(xmins)+1 break # compute KS statistic cx = arange(n)/float(n) #data cf = 1-(xmin/z)**a # fitted dat[xm] = max( abs(cf-cx) ) D = min(dat); #xmin = xmins(find(dat<=D,1,'first')); xmin = xmins[argmin(dat)] z = x[x>=xmin] n = len(z) alpha = 1 + n / sum( log(z/xmin) ) if finite: alpha = alpha*(n-1)/n+1/n if n < 50 and ~finite: print '(PLFIT) Warning: finite-size bias may be present.' L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin)); return xmin,alpha,L,dat
Plots CDF and powerlaw
def plotcdf(x,xmin,alpha): """ Plots CDF and powerlaw """ x=sort(x) n=len(x) xcdf = arange(n,0,-1,dtype='float')/float(n) q = x[x>=xmin] fcdf = (q/xmin)**(1-alpha) nc = xcdf[argmax(x>=xmin)] fcdf_norm = nc*fcdf loglog(x,xcdf) loglog(q,fcdf_norm)
Plots PDF and powerlaw....
def plotpdf(x,xmin,alpha,nbins=30,dolog=False): """ Plots PDF and powerlaw.... """ x=sort(x) n=len(x) if dolog: hb = hist(x,bins=logspace(log10(min(x)),log10(max(x)),nbins),log=True) alpha += 1 else: hb = hist(x,bins=linspace((min(x)),(max(x)),nbins)) h,b=hb[0],hb[1] b = b[1:] q = x[x>=xmin] px = (alpha-1)/xmin * (q/xmin)**(-alpha) arg = argmin(abs(b-xmin)) norm = mean( h[b>xmin] / ((alpha-1)/xmin * (b[b>xmin]/xmin)**(-alpha)) ) px = px*norm loglog(q,px) gca().set_xlim(min(x),max(x))
CDF ( x ) for the piecewise distribution exponential x<xmin powerlaw x > = xmin This is the CDF version of the distributions drawn in fig 3. 4a of Clauset et al.
def plexp(x,xm=1,a=2.5): """ CDF(x) for the piecewise distribution exponential x<xmin, powerlaw x>=xmin This is the CDF version of the distributions drawn in fig 3.4a of Clauset et al. """ C = 1/(-xm/(1 - a) - xm/a + math.exp(a)*xm/a) Ppl = lambda X: 1+C*(xm/(1-a)*(X/xm)**(1-a)) Pexp = lambda X: C*xm/a*math.exp(a)-C*(xm/a)*math.exp(-a*(X/xm-1)) d=Ppl(x) d[x<xm]=Pexp(x) return d
Inverse CDF for a piecewise PDF as defined in eqn. 3. 10 of Clauset et al.
def plexp_inv(P,xm,a): """ Inverse CDF for a piecewise PDF as defined in eqn. 3.10 of Clauset et al. """ C = 1/(-xm/(1 - a) - xm/a + math.exp(a)*xm/a) Pxm = 1+C*(xm/(1-a)) pp = P x = xm*(pp-1)*(1-a)/(C*xm)**(1/(1-a)) if pp >= Pxm else (math.log( ((C*xm/a)*math.exp(a)-pp)/(C*xm/a)) - a) * (-xm/a) #x[P>=Pxm] = xm*( (P[P>=Pxm]-1) * (1-a)/(C*xm) )**(1/(1-a)) # powerlaw #x[P<Pxm] = (math.log( (C*xm/a*math.exp(a)-P[P<Pxm])/(C*xm/a) ) - a) * (-xm/a) # exp return x
Create a mappable function alpha to apply to each xmin in a list of xmins. This is essentially the slow version of fplfit/ cplfit though I bet it could be speeded up with a clever use of parellel_map. Not intended to be used by users.
def alpha_(self,x): """ Create a mappable function alpha to apply to each xmin in a list of xmins. This is essentially the slow version of fplfit/cplfit, though I bet it could be speeded up with a clever use of parellel_map. Not intended to be used by users.""" def alpha(xmin,x=x): """ given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized """ x = [i for i in x if i>=xmin] n = sum(x) divsum = sum([math.log(i/xmin) for i in x]) if divsum == 0: return float('inf') # the "1+" here is unimportant because alpha_ is only used for minimization a = 1 + float(n) / divsum return a return alpha
A pure - Python implementation of the Matlab code http:// www. santafe. edu/ ~aaronc/ powerlaws/ plfit. m from http:// www. santafe. edu/ ~aaronc/ powerlaws/
def plfit(self,nosmall=True,finite=False,quiet=False,silent=False, xmin=None, verbose=False): """ A pure-Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m from http://www.santafe.edu/~aaronc/powerlaws/ See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062) http://arxiv.org/abs/0706.1062 nosmall is on by default; it rejects low s/n points can specify xmin to skip xmin estimation This is only for continuous distributions; I have not implemented a pure-python discrete distribution fitter """ x = self.data z = sorted(x) t = time.time() possible_xmins = sorted(set(z)) argxmins = [z.index(i) for i in possible_xmins] self._nunique = len(possible_xmins) if xmin is None: av = map(self.alpha_(z),possible_xmins) dat = map(self.kstest_(z),possible_xmins) sigma = [(a-1)/math.sqrt(len(z)-i+1) for a,i in zip(av,argxmins)] if nosmall: # test to make sure the number of data points is high enough # to provide a reasonable s/n on the computed alpha goodvals = [s<0.1 for s in sigma] if False in goodvals: nmax = goodvals.index(False) dat = dat[:nmax] possible_xmins = possible_xmins[:nmax] av = av[:nmax] else: print("Not enough data left after flagging - using all positive data.") if not quiet: print("PYTHON plfit executed in %f seconds" % (time.time()-t)) self._av = av self._xmin_kstest = dat self._sigma = sigma # [:-1] to weed out the very last data point; it cannot be correct # (can't have a power law with 1 data point). # However, this should only be done if the ends have not previously # been excluded with nosmall if nosmall: xmin = possible_xmins[dat.index(min(dat))] else: xmin = possible_xmins[dat.index(min(dat[:-1]))] z = [i for i in z if i >= xmin] n = len(z) alpha = 1 + n / sum([math.log(a/xmin) for a in z]) if finite: alpha = alpha*(n-1.)/n+1./n if n == 1 and not silent: print("Failure: only 1 point kept. Probably not a power-law distribution.") self._alpha = 0 self._alphaerr = 0 self._likelihood = 0 self._ks = 0 self._ks_prob = 0 self._xmin = xmin return xmin,0 if n < 50 and not finite and not silent: print('(PLFIT) Warning: finite-size bias may be present. n=%i' % n) # ks = max(abs( numpy.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) )) ks = max( [abs( i/float(n) - (1-(xmin/b)**(alpha-1))) for i,b in zip(range(n),z)] ) # Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) = (alpha-1)/xmin. Really is Eqn B3 in paper. #L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin)) sl = sum([math.log(a/xmin) for a in z]) L = (n*math.log((alpha-1)/xmin) - alpha*sl) #requires another map... Larr = arange(len(unique(x))) * log((av-1)/unique(x)) - av*sum self._likelihood = L self._xmin = xmin self._xmins = possible_xmins self._alpha= alpha self._alphaerr = (alpha-1)/math.sqrt(n) self._ks = ks # this ks statistic may not have the same value as min(dat) because of unique() #if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(ks*numpy.sqrt(n)) self._ngtx = n if math.isnan(L) or math.isnan(xmin) or math.isnan(alpha): raise ValueError("plfit failed; returned a nan") if not quiet: if verbose: print("The lowest value included in the power-law fit, ", end=' ') print("xmin: %g" % xmin, end=' ') if verbose: print("\nThe number of values above xmin, ", end=' ') print("n(>xmin): %i" % n, end=' ') if verbose: print("\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ", end=' ') print("alpha: %g +/- %g " % (alpha,self._alphaerr), end=' ') if verbose: print("\nThe log of the Likelihood (the maximized parameter), ", end=' ') print("Log-Likelihood: %g " % L, end=' ') if verbose: print("\nThe KS-test statistic between the best-fit power-law and the data, ", end=' ') print("ks: %g" % (ks)) return xmin,alpha
Create a mappable function alpha to apply to each xmin in a list of xmins. This is essentially the slow version of fplfit/ cplfit though I bet it could be speeded up with a clever use of parellel_map. Not intended to be used by users.
def alpha_gen(x): """ Create a mappable function alpha to apply to each xmin in a list of xmins. This is essentially the slow version of fplfit/cplfit, though I bet it could be speeded up with a clever use of parellel_map. Not intended to be used by users. Docstring for the generated alpha function:: Given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized If there is only one element, return alpha=0 """ def alpha_(xmin,x=x): """ Given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized If there is only one element, return alpha=0 """ gexmin = x>=xmin n = np.count_nonzero(gexmin) if n < 2: return 0 x = x[gexmin] a = 1 + float(n) / sum(log(x/xmin)) return a return alpha_
CDF ( x ) for the piecewise distribution exponential x<xmin powerlaw x > = xmin This is the CDF version of the distributions drawn in fig 3. 4a of Clauset et al. The constant C normalizes the PDF
def plexp_cdf(x,xmin=1,alpha=2.5, pl_only=False, exp_only=False): """ CDF(x) for the piecewise distribution exponential x<xmin, powerlaw x>=xmin This is the CDF version of the distributions drawn in fig 3.4a of Clauset et al. The constant "C" normalizes the PDF """ x = np.array(x) C = 1/(-xmin/(1 - alpha) - xmin/alpha + exp(alpha)*xmin/alpha) Ppl = lambda X: 1+C*(xmin/(1-alpha)*(X/xmin)**(1-alpha)) Pexp = lambda X: C*xmin/alpha*exp(alpha)-C*(xmin/alpha)*exp(-alpha*(X/xmin-1)) if exp_only: return Pexp(x) elif pl_only: return Ppl(x) d=Ppl(x) d[x<xmin]=Pexp(x)[x<xmin] return d
Inverse CDF for a piecewise PDF as defined in eqn. 3. 10 of Clauset et al.
def plexp_inv(P, xmin, alpha, guess=1.): """ Inverse CDF for a piecewise PDF as defined in eqn. 3.10 of Clauset et al. (previous version was incorrect and lead to weird discontinuities in the distribution function) """ def equation(x,prob): return plexp_cdf(x, xmin, alpha)-prob # http://stackoverflow.com/questions/19840425/scipy-optimize-faster-root-finding-over-2d-grid def solver(y, x0=guess): return scipy.optimize.fsolve(equation, guess, args=(y,)) f = np.vectorize(solver) return f(P)
Equation B. 8 in Clauset
def discrete_likelihood(data, xmin, alpha): """ Equation B.8 in Clauset Given a data set, an xmin value, and an alpha "scaling parameter", computes the log-likelihood (the value to be maximized) """ if not scipyOK: raise ImportError("Can't import scipy. Need scipy for zeta function.") from scipy.special import zeta as zeta zz = data[data>=xmin] nn = len(zz) sum_log_data = np.log(zz).sum() zeta = zeta(alpha, xmin) L_of_alpha = -1*nn*log(zeta) - alpha * sum_log_data return L_of_alpha
Compute the likelihood for all scaling parameters in the range ( alpharange ) for a given xmin. This is only part of the discrete value likelihood maximization problem as described in Clauset et al ( Equation B. 8 )
def discrete_likelihood_vector(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Compute the likelihood for all "scaling parameters" in the range (alpharange) for a given xmin. This is only part of the discrete value likelihood maximization problem as described in Clauset et al (Equation B.8) *alpharange* [ 2-tuple ] Two floats specifying the upper and lower limits of the power law alpha to test """ from scipy.special import zeta as zeta zz = data[data>=xmin] nn = len(zz) alpha_vector = np.linspace(alpharange[0],alpharange[1],n_alpha) sum_log_data = np.log(zz).sum() # alpha_vector is a vector, xmin is a scalar zeta_vector = zeta(alpha_vector, xmin) #xminvec = np.arange(1.0,xmin) #xminalphasum = np.sum([xm**(-alpha_vector) for xm in xminvec]) #L = -1*alpha_vector*sum_log_data - nn*log(zeta_vector) - xminalphasum L_of_alpha = -1*nn*log(zeta_vector) - alpha_vector * sum_log_data return L_of_alpha
Returns the * argument * of the max of the likelihood of the data given an input xmin
def discrete_max_likelihood_arg(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Returns the *argument* of the max of the likelihood of the data given an input xmin """ likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha) Largmax = np.argmax(likelihoods) return Largmax
Returns the * argument * of the max of the likelihood of the data given an input xmin
def discrete_max_likelihood(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Returns the *argument* of the max of the likelihood of the data given an input xmin """ likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha) Lmax = np.max(likelihoods) return Lmax
Return the most likely alpha for the data given an xmin
def most_likely_alpha(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Return the most likely alpha for the data given an xmin """ alpha_vector = np.linspace(alpharange[0],alpharange[1],n_alpha) return alpha_vector[discrete_max_likelihood_arg(data, xmin, alpharange=alpharange, n_alpha=n_alpha)]
Equation B. 17 of Clauset et al 2009
def discrete_alpha_mle(data, xmin): """ Equation B.17 of Clauset et al 2009 The Maximum Likelihood Estimator of the "scaling parameter" alpha in the discrete case is similar to that in the continuous case """ # boolean indices of positive data gexmin = (data>=xmin) nn = gexmin.sum() if nn < 2: return 0 xx = data[gexmin] alpha = 1.0 + float(nn) * (sum(log(xx/(float(xmin)-0.5))))**-1 return alpha
Use the maximum L to determine the most likely value of alpha
def discrete_best_alpha(data, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True): """ Use the maximum L to determine the most likely value of alpha *alpharangemults* [ 2-tuple ] Pair of values indicating multiplicative factors above and below the approximate alpha from the MLE alpha to use when determining the "exact" alpha (by directly maximizing the likelihood function) """ xmins = np.unique(data) if approximate: alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] else: alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] alpharanges = [(0.9*a,1.1*a) for a in alpha_approx] alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ] ksvalues = [ discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ] best_index = argmin(ksvalues) best_alpha = alpha_of_xmin[best_index] best_xmin = xmins[best_index] best_ks = ksvalues[best_index] best_likelihood = discrete_likelihood(data, best_xmin, best_alpha) if verbose: print("alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % ( best_alpha, best_xmin, best_ks, best_likelihood, (data<best_xmin).sum(), (data>=best_xmin).sum())) return best_alpha,best_xmin,best_ks,best_likelihood
given a sorted data set a minimum and an alpha returns the power law ks - test D value w/ data
def discrete_ksD(data, xmin, alpha): """ given a sorted data set, a minimum, and an alpha, returns the power law ks-test D value w/data The returned value is the "D" parameter in the ks test (this is implemented differently from the continuous version because there are potentially multiple identical points that need comparison to the power law) """ zz = np.sort(data[data>=xmin]) nn = float(len(zz)) if nn < 2: return np.inf #cx = np.arange(nn,dtype='float')/float(nn) #cf = 1.0-(zz/xmin)**(1.0-alpha) model_cdf = 1.0-(zz.astype('float')/float(xmin))**(1.0-alpha) data_cdf = np.searchsorted(zz,zz,side='left')/(float(nn)) ks = max(abs(model_cdf-data_cdf)) return ks
A Python implementation of the Matlab code http:// www. santafe. edu/ ~aaronc/ powerlaws/ plfit. m from http:// www. santafe. edu/ ~aaronc/ powerlaws/
def plfit(self, nosmall=True, finite=False, quiet=False, silent=False, usefortran=False, usecy=False, xmin=None, verbose=False, discrete=None, discrete_approx=True, discrete_n_alpha=1000, skip_consistency_check=False): """ A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m from http://www.santafe.edu/~aaronc/powerlaws/ See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062) http://arxiv.org/abs/0706.1062 There are 3 implementations of xmin estimation. The fortran version is fastest, the C (cython) version is ~10% slower, and the python version is ~3x slower than the fortran version. Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown reasons. There is also a discrete version implemented in python - it is different from the continous version! Parameters ---------- discrete : bool or None If *discrete* is None, the code will try to determine whether the data set is discrete or continous based on the uniqueness of the data; if your data set is continuous but you have any non-unique data points (e.g., flagged "bad" data), the "automatic" determination will fail. If *discrete* is True or False, the discrete or continuous fitter will be used, respectively. xmin : float or int If you specify xmin, the fitter will only determine alpha assuming the given xmin; the rest of the code (and most of the complexity) is determining an estimate for xmin and alpha. nosmall : bool When on, the code rejects low s/n points. WARNING: This option, which is on by default, may result in different answers than the original Matlab code and the "powerlaw" python package finite : bool There is a 'finite-size bias' to the estimator. The "alpha" the code measures is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n quiet : bool If False, delivers messages about what fitter is used and the fit results verbose : bool Deliver descriptive messages about the fit parameters (only if `quiet==False`) silent : bool If True, will print NO messages skip_consistency_check : bool The code will normally perform a consistency check to make sure the alpha value computed by the fitter matches the alpha value computed directly in python. It is possible for numerical differences to creep in, usually at the 10^-6 or less level. If you see an exception reporting this type of error, skipping the check can be the appropriate next step. Returns ------- (xmin, alpha) The best-fit xmin and alpha values """ x = self.data if any(x < 0): raise ValueError("Power law distributions are only valid for " "positive data. Remove negative values before " "fitting.") z = np.sort(x) # xmins = the unique values of x that can be used as the threshold for # the power law fit # argxmins = the index of each of these possible thresholds xmins,argxmins = np.unique(z,return_index=True) self._nunique = len(xmins) if self._nunique == len(x) and discrete is None: if verbose: print("Using CONTINUOUS fitter because there are no repeated " "values.") discrete = False elif self._nunique < len(x) and discrete is None: if verbose: print("Using DISCRETE fitter because there are repeated " "values.") discrete = True t = time.time() if xmin is None: if discrete: self.discrete_best_alpha(approximate=discrete_approx, n_alpha=discrete_n_alpha, verbose=verbose, finite=finite) return self._xmin,self._alpha elif usefortran and fortranOK: kstest_values,alpha_values = fplfit.plfit(z, 0) if not quiet: print(("FORTRAN plfit executed in %f seconds" % (time.time()-t))) elif usecy and cyOK: kstest_values,alpha_values = cplfit.plfit_loop(z, nosmall=False, zunique=xmins, argunique=argxmins) if not quiet: print(("CYTHON plfit executed in %f seconds" % (time.time()-t))) else: # python (numpy) version f_alpha = alpha_gen(z) f_kstest = kstest_gen(z) alpha_values = np.asarray(list(map(f_alpha,xmins)), dtype='float') kstest_values = np.asarray(list(map(f_kstest,xmins)), dtype='float') if not quiet: print(("PYTHON plfit executed in %f seconds" % (time.time()-t))) if not quiet: if usefortran and not fortranOK: raise ImportError("fortran fplfit did not load") if usecy and not cyOK: raise ImportError("cython cplfit did not load") # For each alpha, the number of included data points is # total data length - first index of xmin # No +1 is needed: xmin is included. sigma = (alpha_values-1)/np.sqrt(len(z)-argxmins) # I had changed it to this, but I think this is wrong. # sigma = (alpha_values-1)/np.sqrt(len(z)-np.arange(len(z))) if nosmall: # test to make sure the number of data points is high enough # to provide a reasonable s/n on the computed alpha goodvals = sigma<0.1 nmax = argmin(goodvals) if nmax <= 0: nmax = len(xmins) - 1 if not silent: print("Not enough data left after flagging " "low S/N points. " "Using all data.") else: # -1 to weed out the very last data point; it cannot be correct # (can't have a power law with 1 data point). nmax = len(xmins)-1 best_ks_index = argmin(kstest_values[:nmax]) xmin = xmins[best_ks_index] self._alpha_values = alpha_values self._xmin_kstest = kstest_values if scipyOK: # CHECK THIS self._ks_prob_all = np.array([scipy.stats.ksone.sf(D_stat, len(kstest_values)-ii) for ii,D_stat in enumerate(kstest_values)]) self._sigma = sigma # sanity check n = np.count_nonzero(z>=xmin) alpha = 1. + float(n)/sum(log(z[z>=xmin]/xmin)) try: if not skip_consistency_check: np.testing.assert_almost_equal(alpha, alpha_values[best_ks_index], decimal=4) except AssertionError: raise AssertionError("The alpha value computed was not self-" "consistent. This should not happen. " "However, it is possible that this is " "a numerical uncertainty issue; the " "values being compared are {0} and {1}." "If they are close enough, set " "skip_consistency_check=True." .format(alpha, alpha_values[best_ks_index])) z = z[z>=xmin] n = len(z) alpha = 1. + float(n) / sum(log(z/xmin)) if finite: alpha = alpha*(n-1.)/n+1./n if n < 50 and not finite and not silent: print(('(PLFIT) Warning: finite-size bias may be present. n=%i' % n)) ks = max(abs( np.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) )) # Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) = # (alpha-1)/xmin. Really is Eqn B3 in paper. L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin)) #requires another map... Larr = arange(len(unique(x))) * log((alpha_values-1)/unique(x)) - alpha_values*sum self._likelihood = L self._xmin = xmin self._xmins = xmins self._alpha= alpha self._alphaerr = (alpha-1)/np.sqrt(n) # this ks statistic may not have the same value as min(dat) because of unique() self._ks = ks if scipyOK: self._ks_prob = scipy.stats.ksone.sf(ks, n) self._ngtx = n if n == 1: if not silent: print("Failure: only 1 point kept. Probably not a power-law distribution.") self._alpha = alpha = 0 self._alphaerr = 0 self._likelihood = L = 0 self._ks = 0 self._ks_prob = 0 self._xmin = xmin return xmin,0 if np.isnan(L) or np.isnan(xmin) or np.isnan(alpha): raise ValueError("plfit failed; returned a nan") if not quiet: if verbose: print("The lowest value included in the power-law fit, ", end=' ') print("xmin: %g" % xmin, end=' ') if verbose: print("\nThe number of values above xmin, ", end=' ') print("n(>xmin): %i" % n, end=' ') if verbose: print("\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ", end=' ') print("alpha: %g +/- %g " % (alpha,self._alphaerr), end=' ') if verbose: print("\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), ", end=' ') print("Log-Likelihood: %g " % L, end=' ') if verbose: print("\nThe KS-test statistic between the best-fit power-law and the data, ", end=' ') print("ks: %g" % (ks), end=' ') if scipyOK: if verbose: print(" occurs with probability ", end=' ') print("p(ks): %g" % (self._ks_prob)) else: print() return xmin,alpha
Use the maximum likelihood to determine the most likely value of alpha
def discrete_best_alpha(self, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True, finite=True): """ Use the maximum likelihood to determine the most likely value of alpha *alpharangemults* [ 2-tuple ] Pair of values indicating multiplicative factors above and below the approximate alpha from the MLE alpha to use when determining the "exact" alpha (by directly maximizing the likelihood function) *n_alpha* [ int ] Number of alpha values to use when measuring. Larger number is more accurate. *approximate* [ bool ] If False, try to "zoom-in" around the MLE alpha and get the exact best alpha value within some range around the approximate best *vebose* [ bool ] *finite* [ bool ] Correction for finite data? """ data = self.data self._xmins = xmins = np.unique(data) if approximate: alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] else: alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] alpharanges = [(0.9*a,1.1*a) for a in alpha_approx] alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ] ksvalues = np.array([discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ]) self._alpha_values = np.array(alpha_of_xmin) self._xmin_kstest = ksvalues ksvalues[np.isnan(ksvalues)] = np.inf best_index = argmin(ksvalues) self._alpha = best_alpha = alpha_of_xmin[best_index] self._xmin = best_xmin = xmins[best_index] self._ks = best_ks = ksvalues[best_index] self._likelihood = best_likelihood = discrete_likelihood(data, best_xmin, best_alpha) if finite: self._alpha = self._alpha*(n-1.)/n+1./n if verbose: print("alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % ( best_alpha, best_xmin, best_ks, best_likelihood, (data<best_xmin).sum(), (data>=best_xmin).sum())) self._ngtx = n = (self.data>=self._xmin).sum() self._alphaerr = (self._alpha-1.0)/np.sqrt(n) if scipyOK: self._ks_prob = scipy.stats.ksone.sf(self._ks, n) return best_alpha,best_xmin,best_ks,best_likelihood
Plot xmin versus the ks value for derived alpha. This plot can be used as a diagnostic of whether you have derived the best fit: if there are multiple local minima your data set may be well suited to a broken powerlaw or a different function.
def xminvsks(self, **kwargs): """ Plot xmin versus the ks value for derived alpha. This plot can be used as a diagnostic of whether you have derived the 'best' fit: if there are multiple local minima, your data set may be well suited to a broken powerlaw or a different function. """ pylab.plot(self._xmins,self._xmin_kstest,'.') pylab.plot(self._xmin,self._ks,'s') #pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+') ax=pylab.gca() ax.set_ylabel("KS statistic") ax.set_xlabel("min(x)") pylab.draw() return ax
Plot alpha versus the ks value for derived alpha. This plot can be used as a diagnostic of whether you have derived the best fit: if there are multiple local minima your data set may be well suited to a broken powerlaw or a different function.
def alphavsks(self,autozoom=True,**kwargs): """ Plot alpha versus the ks value for derived alpha. This plot can be used as a diagnostic of whether you have derived the 'best' fit: if there are multiple local minima, your data set may be well suited to a broken powerlaw or a different function. """ pylab.plot(self._alpha_values, self._xmin_kstest, '.') pylab.errorbar(self._alpha, self._ks, xerr=self._alphaerr, fmt='+') ax=pylab.gca() if autozoom: ax.set_ylim(0.8*(self._ks),3*(self._ks)) ax.set_xlim((self._alpha)-5*self._alphaerr,(self._alpha)+5*self._alphaerr) ax.set_ylabel("KS statistic") ax.set_xlabel(r'$\alpha$') pylab.draw() return ax
Plots CDF and powerlaw
def plotcdf(self, x=None, xmin=None, alpha=None, pointcolor='k', dolog=True, zoom=True, pointmarker='+', **kwargs): """ Plots CDF and powerlaw """ if x is None: x=self.data if xmin is None: xmin=self._xmin if alpha is None: alpha=self._alpha x=np.sort(x) n=len(x) xcdf = np.arange(n,0,-1,dtype='float')/float(n) q = x[x>=xmin] fcdf = (q/xmin)**(1-alpha) nc = xcdf[argmax(x>=xmin)] fcdf_norm = nc*fcdf D_location = argmax(xcdf[x>=xmin]-fcdf_norm) pylab.vlines(q[D_location], xcdf[x>=xmin][D_location], fcdf_norm[D_location], color='m', linewidth=2, zorder=2) pylab.plot([q[D_location]]*2, [xcdf[x>=xmin][D_location], fcdf_norm[D_location]], color='m', marker='s', zorder=3) #plotx = pylab.linspace(q.min(),q.max(),1000) #ploty = (plotx/xmin)**(1-alpha) * nc if dolog: pylab.loglog(x,xcdf,marker=pointmarker,color=pointcolor,**kwargs) pylab.loglog(q,fcdf_norm,'r',**kwargs) else: pylab.semilogx(x,xcdf,marker=pointmarker,color=pointcolor,**kwargs) pylab.semilogx(q,fcdf_norm,'r',**kwargs) if zoom: pylab.axis([xmin, x.max(), xcdf.min(), nc])
Plots PDF and powerlaw.
def plotpdf(self, x=None, xmin=None, alpha=None, nbins=50, dolog=True, dnds=False, drawstyle='steps-post', histcolor='k', plcolor='r', fill=False, dohist=True, **kwargs): """ Plots PDF and powerlaw. kwargs is passed to pylab.hist and pylab.plot """ if x is None: x=self.data if xmin is None: xmin=self._xmin if alpha is None: alpha=self._alpha x=np.sort(x) #n=len(x) pylab.gca().set_xscale('log') pylab.gca().set_yscale('log') if dnds: hb = pylab.histogram(x,bins=np.logspace(log10(min(x)),log10(max(x)),nbins)) h = hb[0] b = hb[1] db = hb[1][1:]-hb[1][:-1] h = h/db if dohist: pylab.plot(b[:-1],h,drawstyle=drawstyle,color=histcolor,**kwargs) #alpha -= 1 elif dolog: hb = pylab.hist(x, bins=np.logspace(log10(min(x)), log10(max(x)), nbins), log=True, fill=fill, edgecolor=histcolor, **kwargs) alpha -= 1 h,b=hb[0],hb[1] if not dohist: for rect in hb[2]: rect.set_visible(False) else: hb = pylab.hist(x, bins=np.linspace((min(x)), (max(x)), nbins), fill=fill, edgecolor=histcolor, **kwargs) h,b=hb[0],hb[1] if not dohist: for rect in hb[2]: rect.set_visible(False) # plotting points are at the center of each bin b = (b[1:]+b[:-1])/2.0 q = x[x>=xmin] px = (alpha-1)/xmin * (q/xmin)**(-alpha) # Normalize by the median ratio between the histogram and the power-law # The normalization is semi-arbitrary; an average is probably just as valid plotloc = (b>xmin)*(h>0) norm = np.median(h[plotloc] / ((alpha-1)/xmin * (b[plotloc]/xmin)**(-alpha))) px = px*norm plotx = pylab.linspace(q.min(),q.max(),1000) ploty = (alpha-1)/xmin * (plotx/xmin)**(-alpha) * norm #pylab.loglog(q,px,'r',**kwargs) pylab.plot(plotx, ploty, color=plcolor, **kwargs) axlims = pylab.axis() pylab.vlines(xmin, axlims[2], max(px), colors=plcolor, linestyle='dashed') if dolog and min(x) <= 0: lolim = 0.1 else: lolim = min(x) pylab.gca().set_xlim(lolim, max(x))
Plots the power - law - predicted value on the Y - axis against the real values along the X - axis. Can be used as a diagnostic of the fit quality.
def plotppf(self,x=None,xmin=None,alpha=None,dolog=True,**kwargs): """ Plots the power-law-predicted value on the Y-axis against the real values along the X-axis. Can be used as a diagnostic of the fit quality. """ if not(xmin): xmin=self._xmin if not(alpha): alpha=self._alpha if not(x): x=np.sort(self.data[self.data>xmin]) else: x=np.sort(x[x>xmin]) # N = M^(-alpha+1) # M = N^(1/(-alpha+1)) m0 = min(x) N = (1.0+np.arange(len(x)))[::-1] xmodel = m0 * N**(1/(1-alpha)) / max(N)**(1/(1-alpha)) if dolog: pylab.loglog(x,xmodel,'.',**kwargs) pylab.gca().set_xlim(min(x),max(x)) pylab.gca().set_ylim(min(x),max(x)) else: pylab.plot(x,xmodel,'.',**kwargs) pylab.plot([min(x),max(x)],[min(x),max(x)],'k--') pylab.xlabel("Real Value") pylab.ylabel("Power-Law Model Value")
Use the maximum likelihood estimator for a lognormal distribution to produce the best - fit lognormal parameters
def lognormal(self,doprint=True): """ Use the maximum likelihood estimator for a lognormal distribution to produce the best-fit lognormal parameters """ # N = float(self.data.shape[0]) # mu = log(self.data).sum() / N # sigmasquared = ( ( log(self.data) - mu )**2 ).sum() / N # self.lognormal_mu = mu # self.lognormal_sigma = np.sqrt(sigmasquared) # self.lognormal_likelihood = -N/2. * log(np.pi*2) - N/2. * log(sigmasquared) - 1/(2*sigmasquared) * (( self.data - mu )**2).sum() # if doprint: # print "Best fit lognormal is exp( -(x-%g)^2 / (2*%g^2)" % (mu,np.sqrt(sigmasquared)) # print "Likelihood: %g" % (self.lognormal_likelihood) if scipyOK: fitpars = scipy.stats.lognorm.fit(self.data) self.lognormal_dist = scipy.stats.lognorm(*fitpars) self.lognormal_ksD,self.lognormal_ksP = scipy.stats.kstest(self.data,self.lognormal_dist.cdf) # nnlf = NEGATIVE log likelihood self.lognormal_likelihood = -1*scipy.stats.lognorm.nnlf(fitpars,self.data) # Is this the right likelihood ratio? # Definition of L from eqn. B3 of Clauset et al 2009: # L = log(p(x|alpha)) # _nnlf from scipy.stats.distributions: # -sum(log(self._pdf(x, *args)),axis=0) # Assuming the pdf and p(x|alpha) are both non-inverted, it looks # like the _nnlf and L have opposite signs, which would explain the # likelihood ratio I've used here: self.power_lognorm_likelihood = (self._likelihood + self.lognormal_likelihood) # a previous version had 2*(above). That is the correct form if you want the likelihood ratio # statistic "D": http://en.wikipedia.org/wiki/Likelihood-ratio_test # The above explanation makes sense, since nnlf is the *negative* log likelihood function: ## nnlf -- negative log likelihood function (to minimize) # # Assuming we want the ratio between the POSITIVE likelihoods, the D statistic is: # D = -2 log( L_power / L_lognormal ) self.likelihood_ratio_D = -2 * (log(self._likelihood/self.lognormal_likelihood)) if doprint: print("Lognormal KS D: %g p(D): %g" % (self.lognormal_ksD,self.lognormal_ksP), end=' ') print(" Likelihood Ratio Statistic (powerlaw/lognormal): %g" % self.likelihood_ratio_D) print("At this point, have a look at Clauset et al 2009 Appendix C: determining sigma(likelihood_ratio)")
Plot the fitted lognormal distribution
def plot_lognormal_pdf(self,**kwargs): """ Plot the fitted lognormal distribution """ if not hasattr(self,'lognormal_dist'): return normalized_pdf = self.lognormal_dist.pdf(self.data)/self.lognormal_dist.pdf(self.data).max() minY,maxY = pylab.gca().get_ylim() pylab.plot(self.data,normalized_pdf*maxY,'.',**kwargs)
Plot the fitted lognormal distribution
def plot_lognormal_cdf(self,**kwargs): """ Plot the fitted lognormal distribution """ if not hasattr(self,'lognormal_dist'): return x=np.sort(self.data) n=len(x) xcdf = np.arange(n,0,-1,dtype='float')/float(n) lcdf = self.lognormal_dist.sf(x) D_location = argmax(xcdf-lcdf) pylab.vlines(x[D_location],xcdf[D_location],lcdf[D_location],color='m',linewidth=2) pylab.plot(x, lcdf,',',**kwargs)
Sanitizes HTML removing not allowed tags and attributes.
def sanitize_turbo(html, allowed_tags=TURBO_ALLOWED_TAGS, allowed_attrs=TURBO_ALLOWED_ATTRS): """Sanitizes HTML, removing not allowed tags and attributes. :param str|unicode html: :param list allowed_tags: List of allowed tags. :param dict allowed_attrs: Dictionary with attributes allowed for tags. :rtype: unicode """ return clean(html, tags=allowed_tags, attributes=allowed_attrs, strip=True)
Configure Yandex Metrika analytics counter.
def configure_analytics_yandex(self, ident, params=None): """Configure Yandex Metrika analytics counter. :param str|unicode ident: Metrika counter ID. :param dict params: Additional params. """ params = params or {} data = { 'type': 'Yandex', 'id': ident, } if params: data['params'] = '%s' % params self.analytics.append(data)
Generates a list of tags identifying those previously selected.
def tag_list(self, tags): """ Generates a list of tags identifying those previously selected. Returns a list of tuples of the form (<tag name>, <CSS class name>). Uses the string names rather than the tags themselves in order to work with tag lists built from forms not fully submitted. """ return [ (tag.name, "selected taggit-tag" if tag.name in tags else "taggit-tag") for tag in self.model.objects.all() ]
Calculate the great circle distance between two points on the earth ( specified in decimal degrees )
def gcd(self, lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2 c = 2 * math.asin(math.sqrt(a)) dis = E.R * c return dis
Calculate md5 fingerprint.
def hash_md5(self): """Calculate md5 fingerprint. Shamelessly copied from http://stackoverflow.com/questions/6682815/deriving-an-ssh-fingerprint-from-a-public-key-in-python For specification, see RFC4716, section 4.""" fp_plain = hashlib.md5(self._decoded_key).hexdigest() return "MD5:" + ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2]))
Calculate sha256 fingerprint.
def hash_sha256(self): """Calculate sha256 fingerprint.""" fp_plain = hashlib.sha256(self._decoded_key).digest() return (b"SHA256:" + base64.b64encode(fp_plain).replace(b"=", b"")).decode("utf-8")
Calculates sha512 fingerprint.
def hash_sha512(self): """Calculates sha512 fingerprint.""" fp_plain = hashlib.sha512(self._decoded_key).digest() return (b"SHA512:" + base64.b64encode(fp_plain).replace(b"=", b"")).decode("utf-8")