desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Check if a cluster is worthy enough to be merged. If
yes then merge.'
| def merge_subcluster(self, nominee_cluster, threshold):
| new_ss = (self.squared_sum_ + nominee_cluster.squared_sum_)
new_ls = (self.linear_sum_ + nominee_cluster.linear_sum_)
new_n = (self.n_samples_ + nominee_cluster.n_samples_)
new_centroid = ((1 / new_n) * new_ls)
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (((-2) * new_n) * new_nor... |
'Return radius of the subcluster'
| @property
def radius(self):
| dot_product = ((-2) * np.dot(self.linear_sum_, self.centroid_))
return sqrt((((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_))
|
'Build a CF Tree for the input data.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.'
| def fit(self, X, y=None):
| (self.fit_, self.partial_fit_) = (True, False)
return self._fit(X)
|
'Retrieve the leaves of the CF Node.
Returns
leaves : array-like
List of the leaf nodes.'
| def _get_leaves(self):
| leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while (leaf_ptr is not None):
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
|
'Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.'
| def partial_fit(self, X=None, y=None):
| (self.partial_fit_, self.fit_) = (True, False)
if (X is None):
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
|
'Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
labels : ndarray, shape(n_samples)
Labelled data.'
| def predict(self, X):
| X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= (-2)
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
|
'Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.'
| def transform(self, X):
| check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
|
'Global clustering for the subclusters obtained after fitting'
| def _global_clustering(self, X=None):
| clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = ((X is not None) and self.compute_labels)
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
if (len(centroids) < self.n_clusters... |
'Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
if ((X.shape[0] == X.shape[1]) and (self.affinity != 'precomputed')):
warnings.warn('The spectral clustering API has changed. ``fit``now constructs an affinity matrix from data. To use ... |
'Creates a biclustering for X.
Parameters
X : array-like, shape (n_samples, n_features)'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
|
'Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.'
| def _svd(self, array, n_components, n_discard):
| if (self.svd_method == 'randomized'):
kwargs = {}
if (self.n_svd_vecs is not None):
kwargs['n_oversamples'] = self.n_svd_vecs
(u, _, vt) = randomized_svd(array, n_components, random_state=self.random_state, **kwargs)
elif (self.svd_method == 'arpack'):
(u, _, vt) = sv... |
'Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.'
| def _fit_best_piecewise(self, vectors, n_best, n_clusters):
| def make_piecewise(v):
(centroid, labels) = self._k_means(v.reshape((-1), 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors))
result = vec... |
'Project ``data`` to ``vectors`` and cluster the result.'
| def _project_and_cluster(self, data, vectors, n_clusters):
| projected = safe_sparse_dot(data, vectors)
(_, labels) = self._k_means(projected, n_clusters)
return labels
|
'Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
X : array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr')
if (self.affinity == 'precomputed'):
self.affinity_matrix_ = X
elif (self.affinity == 'euclidean'):
self.affinity_matrix_ = (- euclidean_distances(X, squared=True))
else:
raise ValueError(("Affinity must be 'precomputed' or '... |
'Predict the closest cluster each sample in X belongs to.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.'
| def predict(self, X):
| check_is_fitted(self, 'cluster_centers_indices_')
if (not hasattr(self, 'cluster_centers_')):
raise ValueError("Predict method is not supported when affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
'Fit the model using X as training data.
Note that sparse arrays can only be handled by method=\'exact\'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
X : array, shape (n_s... | def _fit(self, X, skip_num_points=0):
| if (self.method not in ['barnes_hut', 'exact']):
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if ((self.angle < 0.0) or (self.angle > 1.0)):
raise ValueError("'angle' must be between 0.0 - 1.0")
if (self.metric == 'precomputed'):
if (i... |
'Runs t-SNE.'
| def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0):
| params = X_embedded.ravel()
opt_args = {'it': 0, 'n_iter_check': self._N_ITER_CHECK, 'min_grad_norm': self.min_grad_norm, 'learning_rate': self.learning_rate, 'verbose': self.verbose, 'kwargs': dict(skip_num_points=skip_num_points), 'args': [P, degrees_of_freedom, n_samples, self.n_components], 'n_iter_without_... |
'Fit X into an embedded space and return that transformed
output.
Parameters
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is \'precomputed\' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
X_new : array, shape (n_samples, n_components)
Embedding of... | def fit_transform(self, X, y=None):
| embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
|
'Fit X into an embedded space.
Parameters
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is \'precomputed\' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is \'exact\', X may be a sparse matrix of type \'csr\', \'csc\'
or \'coo\'.'
| def fit(self, X, y=None):
| self.fit_transform(X)
return self
|
'Computes the position of the points in the embedding space
Parameters
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity==\'precomputed\'``, the input should
be the dissimilarity matrix.
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of... | def fit(self, X, y=None, init=None):
| self.fit_transform(X, init=init)
return self
|
'Fit the data from X, and returns the embedded coordinates
Parameters
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity==\'precomputed\'``, the input should
be the dissimilarity matrix.
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of ... | def fit_transform(self, X, y=None, init=None):
| X = check_array(X)
if ((X.shape[0] == X.shape[1]) and (self.dissimilarity != 'precomputed')):
warnings.warn("The MDS API has changed. ``fit`` now constructs an dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimil... |
'Compute the embedding vectors for data X
Parameters
X : array-like of shape [n_samples, n_features]
training set.
Returns
self : returns an instance of self.'
| def fit(self, X, y=None):
| self._fit_transform(X)
return self
|
'Compute the embedding vectors for data X and transform X.
Parameters
X : array-like of shape [n_samples, n_features]
training set.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| self._fit_transform(X)
return self.embedding_
|
'Transform new points into embedding space.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
X_new : array, shape = [n_samples, n_components]
Notes
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)'
| def transform(self, X):
| check_is_fitted(self, 'nbrs_')
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind], reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[... |
'Calculate the affinity matrix from data
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph comp... | def _get_affinity_matrix(self, X, Y=None):
| if (self.affinity == 'precomputed'):
self.affinity_matrix_ = X
return self.affinity_matrix_
if (self.affinity == 'nearest_neighbors'):
if sparse.issparse(X):
warnings.warn('Nearest neighbors affinity currently does not support sparse input, falling ... |
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
... | def fit(self, X, y=None):
| X = check_array(X, ensure_min_samples=2, estimator=self)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if (self.affinity not in set(('nearest_neighbors', 'rbf', 'precomputed'))):
raise ValueError(("%s is not a valid af... |
'Fit the model from data in X and transform X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency grap... | def fit_transform(self, X, y=None):
| self.fit(X)
return self.embedding_
|
'Compute the reconstruction error for the embedding.
Returns
reconstruction_error : float
Notes
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K ... | def reconstruction_error(self):
| G = ((-0.5) * (self.dist_matrix_ ** 2))
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return (np.sqrt((np.sum((G_center ** 2)) - np.sum((evals ** 2)))) / G.shape[0])
|
'Compute the embedding vectors for data X
Parameters
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
self : returns an instance of self.'
| def fit(self, X, y=None):
| self._fit_transform(X)
return self
|
'Fit the model from data in X and transform X.
Parameters
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| self._fit_transform(X)
return self.embedding_
|
'Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order... | def transform(self, X):
| X = check_array(X)
(distances, indices) = self.nbrs_.kneighbors(X, return_distance=True)
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]] + distances[i][:, None]), 0)
G_X **= 2
G_X *= (-0.5)
return s... |
'Creates a customized copy of the Parameter.'
| def replace(self, name=_void, kind=_void, annotation=_void, default=_void, _partial_kwarg=_void):
| if (name is _void):
name = self._name
if (kind is _void):
kind = self._kind
if (annotation is _void):
annotation = self._annotation
if (default is _void):
default = self._default
if (_partial_kwarg is _void):
_partial_kwarg = self._partial_kwarg
return typ... |
'Constructs Signature from the given list of Parameter
objects and \'return_annotation\'. All arguments are optional.'
| def __init__(self, parameters=None, return_annotation=_empty, __validate_parameters__=True):
| if (parameters is None):
params = OrderedDict()
elif __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for (idx, param) in enumerate(parameters):
kind = param.kind
if (kind < top_kind):
msg = 'wrong parameter ... |
'Constructs Signature for the given python function'
| @classmethod
def from_function(cls, func):
| if (not isinstance(func, types.FunctionType)):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_coun... |
'Creates a customized copy of the Signature.
Pass \'parameters\' and/or \'return_annotation\' arguments
to override them in the new copy.'
| def replace(self, parameters=_void, return_annotation=_void):
| if (parameters is _void):
parameters = self.parameters.values()
if (return_annotation is _void):
return_annotation = self._return_annotation
return type(self)(parameters, return_annotation=return_annotation)
|
'Private method. Don\'t use directly.'
| def _bind(self, args, kwargs, partial=False):
| arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
for (param_name, param) in self.parameters.items():
if (param._partial_kwarg and (param_name not in kwargs)):
kwargs[param_name] = param.defa... |
'Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function\'s signature. Raises `TypeError`
if the passed arguments can not be bound.'
| def bind(self, *args, **kwargs):
| return self._bind(args, kwargs)
|
'Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function\'s signature.
Raises `TypeError` if the passed arguments can not be bound.'
| def bind_partial(self, *args, **kwargs):
| return self._bind(args, kwargs, partial=True)
|
'Parameters
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.'
| def __init__(self, hash_name='md5', coerce_mmap=False):
| self.coerce_mmap = coerce_mmap
Hasher.__init__(self, hash_name=hash_name)
import numpy as np
self.np = np
if hasattr(np, 'getbuffer'):
self._getbuffer = np.getbuffer
else:
self._getbuffer = memoryview
|
'Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.'
| def save(self, obj):
| if (isinstance(obj, self.np.ndarray) and (not obj.dtype.hasobject)):
if (obj.shape == ()):
obj_c_contiguous = obj.flatten()
elif obj.flags.c_contiguous:
obj_c_contiguous = obj
elif obj.flags.f_contiguous:
obj_c_contiguous = obj.T
else:
... |
'Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.'
| def close(self):
| with self._lock:
if (self._mode == _MODE_CLOSED):
return
try:
if (self._mode in (_MODE_READ, _MODE_READ_EOF)):
self._decompressor = None
elif (self._mode == _MODE_WRITE):
self._fp.write(self._compressor.flush())
self... |
'True if this file is closed.'
| @property
def closed(self):
| return (self._mode == _MODE_CLOSED)
|
'Return the file descriptor for the underlying file.'
| def fileno(self):
| self._check_not_closed()
return self._fp.fileno()
|
'Return whether the file supports seeking.'
| def seekable(self):
| return (self.readable() and self._fp.seekable())
|
'Return whether the file was opened for reading.'
| def readable(self):
| self._check_not_closed()
return (self._mode in (_MODE_READ, _MODE_READ_EOF))
|
'Return whether the file was opened for writing.'
| def writable(self):
| self._check_not_closed()
return (self._mode == _MODE_WRITE)
|
'Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b\'\' if the file is already at EOF.'
| def read(self, size=(-1)):
| with self._lock:
self._check_can_read()
if (size == 0):
return ''
elif (size < 0):
return self._read_all()
else:
return self._read_block(size)
|
'Read up to len(b) bytes into b.
Returns the number of bytes read (0 for EOF).'
| def readinto(self, b):
| with self._lock:
return io.BufferedIOBase.readinto(self, b)
|
'Write a byte string to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.'
| def write(self, data):
| with self._lock:
self._check_can_write()
if isinstance(data, memoryview):
data = data.tobytes()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
|
'Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Values for whence are:
0: start of stream (default); offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is ... | def seek(self, offset, whence=0):
| with self._lock:
self._check_can_seek()
if (whence == 0):
pass
elif (whence == 1):
offset = (self._pos + offset)
elif (whence == 2):
if (self._size < 0):
self._read_all(return_data=False)
offset = (self._size + offset)
... |
'Return the current file position.'
| def tell(self):
| with self._lock:
self._check_not_closed()
return self._pos
|
'Build a process or thread pool and return the number of workers'
| def _initialize_backend(self):
| try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self, **self._backend_args)
if ((self.timeout is not None) and (not self._backend.supports_timeout)):
warnings.warn("The backend class {!r} does not support timeout. You have set 'timeout=... |
'Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.'
| def _dispatch(self, batch):
| if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._backend.apply_async(batch, callback=cb)
self._jobs.append(job)
|
'Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.'
| def dispatch_next(self):
| if (not self.dispatch_one_batch(self._original_iterator)):
self._iterating = False
self._original_iterator = None
|
'Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.'
| def dispatch_one_batch(self, iterator):
| if (self.batch_size == 'auto'):
batch_size = self._backend.compute_batch_size()
else:
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if (len(tasks) == 0):
return False
else:
self._disp... |
'Display the message on stout or stderr depending on verbosity'
| def _print(self, msg, msg_args):
| if (not self.verbose):
return
if (self.verbose < 50):
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = (msg % msg_args)
writer(('[%s]: %s\n' % (self, msg)))
|
'Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.'
| def print_progress(self):
| if (not self.verbose):
return
elapsed_time = (time.time() - self._start_time)
if (self._original_iterator is not None):
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s', (se... |
'Attach a reducer function to a given type in the dispatch table.'
| def register(self, type, reduce_func):
| if hasattr(Pickler, 'dispatch'):
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
|
'Constructor. Store the useful information for later.'
| def __init__(self, subclass, shape, order, dtype, allow_mmap=False):
| self.subclass = subclass
self.shape = shape
self.order = order
self.dtype = dtype
self.allow_mmap = allow_mmap
|
'Write array bytes to pickler file handle.
This function is an adaptation of the numpy write_array function
available in version 1.10.1 in numpy/lib/format.py.'
| def write_array(self, array, pickler):
| buffersize = max(((16 * (1024 ** 2)) // array.itemsize), 1)
if array.dtype.hasobject:
pickle.dump(array, pickler.file_handle, protocol=2)
else:
for chunk in pickler.np.nditer(array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order=self.order):
pick... |
'Read array from unpickler file handle.
This function is an adaptation of the numpy read_array function
available in version 1.10.1 in numpy/lib/format.py.'
| def read_array(self, unpickler):
| if (len(self.shape) == 0):
count = 1
else:
count = unpickler.np.multiply.reduce(self.shape)
if self.dtype.hasobject:
array = pickle.load(unpickler.file_handle)
else:
if ((not PY3_OR_LATER) and unpickler.np.compat.isfileobj(unpickler.file_handle)):
array = unpi... |
'Read an array using numpy memmap.'
| def read_mmap(self, unpickler):
| offset = unpickler.file_handle.tell()
if (unpickler.mmap_mode == 'w+'):
unpickler.mmap_mode = 'r+'
marray = make_memmap(unpickler.filename, dtype=self.dtype, shape=self.shape, order=self.order, mode=unpickler.mmap_mode, offset=offset)
unpickler.file_handle.seek((offset + marray.nbytes))
retu... |
'Read the array corresponding to this wrapper.
Use the unpickler to get all information to correctly read the array.
Parameters
unpickler: NumpyUnpickler
Returns
array: numpy.ndarray'
| def read(self, unpickler):
| if ((unpickler.mmap_mode is not None) and self.allow_mmap):
array = self.read_mmap(unpickler)
else:
array = self.read_array(unpickler)
if (hasattr(array, '__array_prepare__') and (self.subclass not in (unpickler.np.ndarray, unpickler.np.memmap))):
new_array = unpickler.np.core.multia... |
'Create and returns a numpy array wrapper from a numpy array.'
| def _create_array_wrapper(self, array):
| order = ('F' if (array.flags.f_contiguous and (not array.flags.c_contiguous)) else 'C')
allow_mmap = ((not self.buffered) and (not array.dtype.hasobject))
wrapper = NumpyArrayWrapper(type(array), array.shape, order, array.dtype, allow_mmap=allow_mmap)
return wrapper
|
'Subclass the Pickler `save` method.
This is a total abuse of the Pickler class in order to use the numpy
persistence function `save` instead of the default pickle
implementation. The numpy array is replaced by a custom wrapper in the
pickle persistence stack and the serialized array is written right
after in the file.... | def save(self, obj):
| if ((self.np is not None) and (type(obj) in (self.np.ndarray, self.np.matrix, self.np.memmap))):
if (type(obj) is self.np.memmap):
obj = self.np.asanyarray(obj)
wrapper = self._create_array_wrapper(obj)
Pickler.save(self, wrapper)
if (self.proto >= 4):
self.fr... |
'Called to set the state of a newly created object.
We capture it to replace our place-holder objects, NDArrayWrapper or
NumpyArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
NDArrayWrapper is used for backward compatibility with joblib <= 0.9.'
| def load_build(self):
| Unpickler.load_build(self)
if isinstance(self.stack[(-1)], (NDArrayWrapper, NumpyArrayWrapper)):
if (self.np is None):
raise ImportError("Trying to unpickle an ndarray, but numpy didn't import correctly")
array_wrapper = self.stack.pop()
if isinstan... |
'Reconfigure the backend and return the number of workers.
This makes it possible to reuse an existing backend instance for
successive independent calls to Parallel with different parameters.'
| def configure(self, n_jobs=1, parallel=None, **backend_args):
| self.parallel = parallel
return self.effective_n_jobs(n_jobs)
|
'Determine the optimal batch size'
| def compute_batch_size(self):
| return 1
|
'List of exception types to be captured.'
| def get_exceptions(self):
| return []
|
'Abort any running tasks
This is called when an exception has been raised when executing a tasks
and all the remaining tasks will be ignored and can therefore be
aborted to spare computation resources.
If ensure_ready is True, the backend should be left in an operating
state as future tasks might be re-submitted via th... | def abort_everything(self, ensure_ready=True):
| pass
|
'Determine the number of jobs which are going to run in parallel'
| def effective_n_jobs(self, n_jobs):
| if (n_jobs == 0):
raise ValueError('n_jobs == 0 in Parallel has no meaning')
return 1
|
'Schedule a func to be run'
| def apply_async(self, func, callback=None):
| result = ImmediateResult(func)
if callback:
callback(result)
return result
|
'Determine the number of jobs which are going to run in parallel'
| def effective_n_jobs(self, n_jobs):
| if (n_jobs == 0):
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif ((mp is None) or (n_jobs is None)):
return 1
elif (n_jobs < 0):
n_jobs = max(((mp.cpu_count() + 1) + n_jobs), 1)
return n_jobs
|
'Shutdown the process or thread pool'
| def terminate(self):
| if (self._pool is not None):
self._pool.close()
self._pool.terminate()
self._pool = None
|
'Schedule a func to be run'
| def apply_async(self, func, callback=None):
| return self._pool.apply_async(SafeFunction(func), callback=callback)
|
'Shutdown the pool and restart a new one with the same parameters'
| def abort_everything(self, ensure_ready=True):
| self.terminate()
if ensure_ready:
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel, **self.parallel._backend_args)
|
'Determine the optimal batch size'
| def compute_batch_size(self):
| old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if ((batch_duration > 0) and (batch_duration < self.MIN_IDEAL_BATCH_DURATION)):
ideal_batch_size = int(((old_batch_size * self.MIN_IDEAL_BATCH_DURATION) / batch_duration))
batch_size = max((2 * ideal_b... |
'Callback indicate how long it took to run a batch'
| def batch_completed(self, batch_size, duration):
| if (batch_size == self._effective_batch_size):
old_duration = self._smoothed_batch_duration
if (old_duration == 0):
new_duration = duration
else:
new_duration = ((0.8 * old_duration) + (0.2 * duration))
self._smoothed_batch_duration = new_duration
|
'Build a process or thread pool and return the number of workers'
| def configure(self, n_jobs=1, parallel=None, **backend_args):
| n_jobs = self.effective_n_jobs(n_jobs)
if (n_jobs == 1):
raise FallbackToBackend(SequentialBackend())
self.parallel = parallel
self._pool = ThreadPool(n_jobs)
return n_jobs
|
'Determine the number of jobs which are going to run in parallel.
This also checks if we are attempting to create a nested parallel
loop.'
| def effective_n_jobs(self, n_jobs):
| if (mp is None):
return 1
if mp.current_process().daemon:
if (n_jobs != 1):
warnings.warn('Multiprocessing-backed parallel loops cannot be nested, setting n_jobs=1', stacklevel=3)
return 1
if (not isinstance(threading.current_thread(), threading._Main... |
'Build a process or thread pool and return the number of workers'
| def configure(self, n_jobs=1, parallel=None, **backend_args):
| n_jobs = self.effective_n_jobs(n_jobs)
if (n_jobs == 1):
raise FallbackToBackend(SequentialBackend())
already_forked = int(os.environ.get(self.JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing without prote... |
'Shutdown the process or thread pool'
| def terminate(self):
| super(MultiprocessingBackend, self).terminate()
if (self.JOBLIB_SPAWNED_PROCESS in os.environ):
del os.environ[self.JOBLIB_SPAWNED_PROCESS]
|
'Constructor. Store the useful information for later.'
| def __init__(self, filename, subclass, allow_mmap=True):
| self.filename = filename
self.subclass = subclass
self.allow_mmap = allow_mmap
|
'Reconstruct the array.'
| def read(self, unpickler):
| filename = os.path.join(unpickler._dirname, self.filename)
allow_mmap = getattr(self, 'allow_mmap', True)
memmap_kwargs = ({} if (not allow_mmap) else {'mmap_mode': unpickler.mmap_mode})
array = unpickler.np.load(filename, **memmap_kwargs)
if (hasattr(array, '__array_prepare__') and (self.subclass n... |
'Constructor. Store the useful information for later.'
| def __init__(self, filename, init_args, state):
| self.filename = filename
self.state = state
self.init_args = init_args
|
'Reconstruct the array from the meta-information and the z-file.'
| def read(self, unpickler):
| filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
with open(filename, 'rb') as f:
data = read_zfile(f)
state = (self.state + (data,))
array.__setstate__(state)
return array
|
'Constructor.'
| def __init__(self, filename, file_handle, mmap_mode=None):
| self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np... |
'Set the state of a newly created object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.'
| def load_build(self):
| Unpickler.load_build(self)
if isinstance(self.stack[(-1)], NDArrayWrapper):
if (self.np is None):
raise ImportError("Trying to unpickle an ndarray, but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read... |
'Parameters
depth: int, optional
The depth of objects printed.'
| def __init__(self, depth=3):
| self.depth = depth
|
'Return the formatted representation of the object.'
| def format(self, obj, indent=0):
| return pformat(obj, indent=indent, depth=self.depth)
|
'Print the time elapsed between the last call and the current
call, with an optional message.'
| def __call__(self, msg='', total=False):
| if (not total):
time_lapse = (time.time() - self.last_time)
full_msg = ('%s: %s' % (msg, format_time(time_lapse)))
else:
time_lapse = (time.time() - self.start_time)
full_msg = ('%s: %.2fs, %.1f min' % (msg, time_lapse, (time_lapse / 60)))
print(full_msg, file=sys... |
'Read value from cache and return it.'
| def get(self):
| return _load_output(self._output_dir, _get_func_fullname(self.func), timestamp=self.timestamp, metadata=self.metadata, mmap_mode=self.mmap_mode, verbose=self.verbose)
|
'Clear value from cache'
| def clear(self):
| shutil.rmtree(self._output_dir, ignore_errors=True)
|
'Parameters
func: callable
The function to decorate
cachedir: string
The path of the base directory to use as a data store
ignore: list or None
List of variable names to ignore.
mmap_mode: {None, \'r+\', \'r\', \'w+\', \'c\'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for th... | def __init__(self, func, cachedir, ignore=None, mmap_mode=None, compress=False, verbose=1, timestamp=None):
| Logger.__init__(self)
self.mmap_mode = mmap_mode
self.func = func
if (ignore is None):
ignore = []
self.ignore = ignore
self._verbose = verbose
self.cachedir = cachedir
self.compress = compress
if (compress and (self.mmap_mode is not None)):
warnings.warn('Compressed ... |
'Call wrapped function and cache result, or read cache if available.
This function returns the wrapped function output and some metadata.
Returns
output: value or tuple
what is returned by wrapped function
argument_hash: string
hash of function arguments
metadata: dict
some metadata about wrapped function call (see _pe... | def _cached_call(self, args, kwargs):
| (output_dir, argument_hash) = self._get_output_dir(*args, **kwargs)
metadata = None
output_pickle_path = os.path.join(output_dir, 'output.pkl')
if (not (self._check_previous_func_code(stacklevel=4) and os.path.isfile(output_pickle_path))):
if (self._verbose > 10):
(_, name) = get_fun... |
'Call wrapped function, cache result and return a reference.
This method returns a reference to the cached result instead of the
result itself. The reference object is small and pickeable, allowing
to send or store it easily. Call .get() on reference object to get
result.
Returns
cached_result: MemorizedResult or NotMe... | def call_and_shelve(self, *args, **kwargs):
| (_, argument_hash, metadata) = self._cached_call(args, kwargs)
return MemorizedResult(self.cachedir, self.func, argument_hash, metadata=metadata, verbose=(self._verbose - 1), timestamp=self.timestamp)
|
'We don\'t store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__'
| def __reduce__(self):
| return (self.__class__, (self.func, self.cachedir, self.ignore, self.mmap_mode, self.compress, self._verbose))
|
'Return the directory in which are persisted the result
of the function called with the given arguments.'
| def _get_output_dir(self, *args, **kwargs):
| argument_hash = self._get_argument_hash(*args, **kwargs)
output_dir = os.path.join(self._get_func_dir(self.func), argument_hash)
return (output_dir, argument_hash)
|
'Get the directory corresponding to the cache for the
function.'
| def _get_func_dir(self, mkdir=True):
| func_dir = _cache_key_to_dir(self.cachedir, self.func, None)
if mkdir:
mkdirp(func_dir)
return func_dir
|
'Hash a function to key the online cache'
| def _hash_func(self):
| func_code_h = hash(getattr(self.func, '__code__', None))
return (id(self.func), hash(self.func), func_code_h)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.