code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
A, x, b = make_system(A, x, b, formats=['csr']) sweep = slice(None) (row_start, row_stop, row_step) = sweep.indices(A.shape[0]) temp = np.zeros_like(x) # Dinv for A*A.H Dinv = get_diagonal(A, norm_eq=2, inv=True) # Create uniform type, convert possibly complex scalars to length 1 arrays [omega] = type_prep(A.dtype, [omega]) for i in range(iterations): delta = (np.ravel(b - A*x)*np.ravel(Dinv)).astype(A.dtype) amg_core.jacobi_ne(A.indptr, A.indices, A.data, x, b, delta, temp, row_start, row_stop, row_step, omega)
def jacobi_ne(A, x, b, iterations=1, omega=1.0)
Perform Jacobi iterations on the linear system A A.H x = A.H b. Also known as Cimmino relaxation Parameters ---------- A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform omega : scalar Damping parameter Returns ------- Nothing, x will be modified in place. References ---------- .. [1] Brandt, Ta'asan. "Multigrid Method For Nearly Singular And Slightly Indefinite Problems." 1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026; NASA-CR-178026; .. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937 .. [3] Cimmino. La ricerca scientifica ser. II 1. Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938. Examples -------- >>> # Use NE Jacobi as a Stand-Alone Solver >>> from pyamg.relaxation.relaxation import jacobi_ne >>> from pyamg.gallery import poisson >>> from pyamg.util.linalg import norm >>> import numpy as np >>> A = poisson((50,50), format='csr') >>> x0 = np.zeros((A.shape[0],1)) >>> b = np.ones((A.shape[0],1)) >>> jacobi_ne(A, x0, b, iterations=10, omega=2.0/3.0) >>> print norm(b-A*x0) 49.3886046066 >>> # >>> # Use NE Jacobi as the Multigrid Smoother >>> from pyamg import smoothed_aggregation_solver >>> opts = {'iterations' : 2, 'omega' : 4.0/3.0} >>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)), ... coarse_solver='pinv2', max_coarse=50, ... presmoother=('jacobi_ne', opts), ... postsmoother=('jacobi_ne', opts)) >>> x0=np.zeros((A.shape[0],1)) >>> residuals=[] >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
6.114159
6.510365
0.939142
A, x, b = make_system(A, x, b, formats=['csr']) # Dinv for A*A.H if Dinv is None: Dinv = np.ravel(get_diagonal(A, norm_eq=2, inv=True)) if sweep == 'forward': row_start, row_stop, row_step = 0, len(x), 1 elif sweep == 'backward': row_start, row_stop, row_step = len(x)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): gauss_seidel_ne(A, x, b, iterations=1, sweep='forward', omega=omega, Dinv=Dinv) gauss_seidel_ne(A, x, b, iterations=1, sweep='backward', omega=omega, Dinv=Dinv) return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") for i in range(iterations): amg_core.gauss_seidel_ne(A.indptr, A.indices, A.data, x, b, row_start, row_stop, row_step, Dinv, omega)
def gauss_seidel_ne(A, x, b, iterations=1, sweep='forward', omega=1.0, Dinv=None)
Perform Gauss-Seidel iterations on the linear system A A.H x = b. Also known as Kaczmarz relaxation Parameters ---------- A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep omega : float Relaxation parameter typically in (0, 2) if omega != 1.0, then algorithm becomes SOR on A A.H Dinv : ndarray Inverse of diag(A A.H), (length N) Returns ------- Nothing, x will be modified in place. References ---------- .. [1] Brandt, Ta'asan. "Multigrid Method For Nearly Singular And Slightly Indefinite Problems." 1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026; NASA-CR-178026; .. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937 Examples -------- >>> # Use NE Gauss-Seidel as a Stand-Alone Solver >>> from pyamg.relaxation.relaxation import gauss_seidel_ne >>> from pyamg.gallery import poisson >>> from pyamg.util.linalg import norm >>> import numpy as np >>> A = poisson((10,10), format='csr') >>> x0 = np.zeros((A.shape[0],1)) >>> b = np.ones((A.shape[0],1)) >>> gauss_seidel_ne(A, x0, b, iterations=10, sweep='symmetric') >>> print norm(b-A*x0) 8.47576806771 >>> # >>> # Use NE Gauss-Seidel as the Multigrid Smoother >>> from pyamg import smoothed_aggregation_solver >>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)), ... coarse_solver='pinv2', max_coarse=50, ... presmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}), ... postsmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'})) >>> x0=np.zeros((A.shape[0],1)) >>> residuals=[] >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
2.485411
2.649138
0.938196
A, x, b = make_system(A, x, b, formats=['csc']) # Dinv for A.H*A if Dinv is None: Dinv = np.ravel(get_diagonal(A, norm_eq=1, inv=True)) if sweep == 'forward': col_start, col_stop, col_step = 0, len(x), 1 elif sweep == 'backward': col_start, col_stop, col_step = len(x)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): gauss_seidel_nr(A, x, b, iterations=1, sweep='forward', omega=omega, Dinv=Dinv) gauss_seidel_nr(A, x, b, iterations=1, sweep='backward', omega=omega, Dinv=Dinv) return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") # Calculate initial residual r = b - A*x for i in range(iterations): amg_core.gauss_seidel_nr(A.indptr, A.indices, A.data, x, r, col_start, col_stop, col_step, Dinv, omega)
def gauss_seidel_nr(A, x, b, iterations=1, sweep='forward', omega=1.0, Dinv=None)
Perform Gauss-Seidel iterations on the linear system A.H A x = A.H b. Parameters ---------- A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep omega : float Relaxation parameter typically in (0, 2) if omega != 1.0, then algorithm becomes SOR on A.H A Dinv : ndarray Inverse of diag(A.H A), (length N) Returns ------- Nothing, x will be modified in place. References ---------- .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 247-9, 2003 http://www-users.cs.umn.edu/~saad/books.html Examples -------- >>> # Use NR Gauss-Seidel as a Stand-Alone Solver >>> from pyamg.relaxation.relaxation import gauss_seidel_nr >>> from pyamg.gallery import poisson >>> from pyamg.util.linalg import norm >>> import numpy as np >>> A = poisson((10,10), format='csr') >>> x0 = np.zeros((A.shape[0],1)) >>> b = np.ones((A.shape[0],1)) >>> gauss_seidel_nr(A, x0, b, iterations=10, sweep='symmetric') >>> print norm(b-A*x0) 8.45044864352 >>> # >>> # Use NR Gauss-Seidel as the Multigrid Smoother >>> from pyamg import smoothed_aggregation_solver >>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)), ... coarse_solver='pinv2', max_coarse=50, ... presmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}), ... postsmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'})) >>> x0=np.zeros((A.shape[0],1)) >>> residuals=[] >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
2.605257
2.798225
0.931039
# Check if A has a pre-existing set of Schwarz parameters if hasattr(A, 'schwarz_parameters'): if subdomain is not None and subdomain_ptr is not None: # check that the existing parameters correspond to the same # subdomains if np.array(A.schwarz_parameters[0] == subdomain).all() and \ np.array(A.schwarz_parameters[1] == subdomain_ptr).all(): return A.schwarz_parameters else: return A.schwarz_parameters # Default is to use the overlapping regions defined by A's sparsity pattern if subdomain is None or subdomain_ptr is None: subdomain_ptr = A.indptr.copy() subdomain = A.indices.copy() # Extract each subdomain's block from the matrix if inv_subblock is None or inv_subblock_ptr is None: inv_subblock_ptr = np.zeros(subdomain_ptr.shape, dtype=A.indices.dtype) blocksize = (subdomain_ptr[1:] - subdomain_ptr[:-1]) inv_subblock_ptr[1:] = np.cumsum(blocksize*blocksize) # Extract each block column from A inv_subblock = np.zeros((inv_subblock_ptr[-1],), dtype=A.dtype) amg_core.extract_subblocks(A.indptr, A.indices, A.data, inv_subblock, inv_subblock_ptr, subdomain, subdomain_ptr, int(subdomain_ptr.shape[0]-1), A.shape[0]) # Choose tolerance for which singular values are zero in *gelss below t = A.dtype.char eps = np.finfo(np.float).eps feps = np.finfo(np.single).eps geps = np.finfo(np.longfloat).eps _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2} cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]] # Invert each block column my_pinv, = la.get_lapack_funcs(['gelss'], (np.ones((1,), dtype=A.dtype))) for i in range(subdomain_ptr.shape[0]-1): m = blocksize[i] rhs = sp.eye(m, m, dtype=A.dtype) j0 = inv_subblock_ptr[i] j1 = inv_subblock_ptr[i+1] gelssoutput = my_pinv(inv_subblock[j0:j1].reshape(m, m), rhs, cond=cond, overwrite_a=True, overwrite_b=True) inv_subblock[j0:j1] = np.ravel(gelssoutput[1]) A.schwarz_parameters = (subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr) return A.schwarz_parameters
def schwarz_parameters(A, subdomain=None, subdomain_ptr=None, inv_subblock=None, inv_subblock_ptr=None)
Set Schwarz parameters. Helper function for setting up Schwarz relaxation. This function avoids recomputing the subdomains and block inverses manytimes, e.g., it avoids a costly double computation when setting up pre and post smoothing with Schwarz. Parameters ---------- A {csr_matrix} Returns ------- A.schwarz_parameters[0] is subdomain A.schwarz_parameters[1] is subdomain_ptr A.schwarz_parameters[2] is inv_subblock A.schwarz_parameters[3] is inv_subblock_ptr
2.925457
2.887217
1.013245
cycle = str(cycle).upper() nnz = [level.A.nnz for level in self.levels] def V(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + V(level + 1) def W(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + 2 * W(level + 1) def F(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + F(level + 1) + V(level + 1) if cycle == 'V': flops = V(0) elif (cycle == 'W') or (cycle == 'AMLI'): flops = W(0) elif cycle == 'F': flops = F(0) else: raise TypeError('Unrecognized cycle type (%s)' % cycle) return float(flops) / float(nnz[0])
def cycle_complexity(self, cycle='V')
Cycle complexity of V, W, AMLI, and F(1,1) cycle with simple relaxation. Cycle complexity is an approximate measure of the number of floating point operations (FLOPs) required to perform a single multigrid cycle relative to the cost a single smoothing operation. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- cc : float Defined as F_sum / F_0, where F_sum is the total number of nonzeros in the matrix on all levels encountered during a cycle and F_0 is the number of nonzeros in the matrix on the finest level. Notes ----- This is only a rough estimate of the true cycle complexity. The estimate assumes that the cost of pre and post-smoothing are (each) equal to the number of nonzeros in the matrix on that level. This assumption holds for smoothers like Jacobi and Gauss-Seidel. However, the true cycle complexity of cycle using more expensive methods, like block Gauss-Seidel will be underestimated. Additionally, if the cycle used in practice isn't a (1,1)-cycle, then this cost estimate will be off.
1.926405
1.773029
1.086505
return sum([level.A.nnz for level in self.levels]) /\ float(self.levels[0].A.nnz)
def operator_complexity(self)
Operator complexity of this multigrid hierarchy. Defined as: Number of nonzeros in the matrix on all levels / Number of nonzeros in the matrix on the finest level
9.265436
6.003919
1.543231
return sum([level.A.shape[0] for level in self.levels]) /\ float(self.levels[0].A.shape[0])
def grid_complexity(self)
Grid complexity of this multigrid hierarchy. Defined as: Number of unknowns on all levels / Number of unknowns on the finest level
6.612605
5.444808
1.214479
from scipy.sparse.linalg import LinearOperator shape = self.levels[0].A.shape dtype = self.levels[0].A.dtype def matvec(b): return self.solve(b, maxiter=1, cycle=cycle, tol=1e-12) return LinearOperator(shape, matvec, dtype=dtype)
def aspreconditioner(self, cycle='V')
Create a preconditioner using this multigrid cycle. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- precond : LinearOperator Preconditioner suitable for the iterative solvers in defined in the scipy.sparse.linalg module (e.g. cg, gmres) and any other solver that uses the LinearOperator interface. Refer to the LinearOperator documentation in scipy.sparse.linalg See Also -------- multilevel_solver.solve, scipy.sparse.linalg.LinearOperator Examples -------- >>> from pyamg.aggregation import smoothed_aggregation_solver >>> from pyamg.gallery import poisson >>> from scipy.sparse.linalg import cg >>> import scipy as sp >>> A = poisson((100, 100), format='csr') # matrix >>> b = sp.rand(A.shape[0]) # random RHS >>> ml = smoothed_aggregation_solver(A) # AMG solver >>> M = ml.aspreconditioner(cycle='V') # preconditioner >>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
3.673719
4.687424
0.783739
A = self.levels[lvl].A self.levels[lvl].presmoother(A, x, b) residual = b - A * x coarse_b = self.levels[lvl].R * residual coarse_x = np.zeros_like(coarse_b) if lvl == len(self.levels) - 2: coarse_x[:] = self.coarse_solver(self.levels[-1].A, coarse_b) else: if cycle == 'V': self.__solve(lvl + 1, coarse_x, coarse_b, 'V') elif cycle == 'W': self.__solve(lvl + 1, coarse_x, coarse_b, cycle) self.__solve(lvl + 1, coarse_x, coarse_b, cycle) elif cycle == 'F': self.__solve(lvl + 1, coarse_x, coarse_b, cycle) self.__solve(lvl + 1, coarse_x, coarse_b, 'V') elif cycle == "AMLI": # Run nAMLI AMLI cycles, which compute "optimal" corrections by # orthogonalizing the coarse-grid corrections in the A-norm nAMLI = 2 Ac = self.levels[lvl + 1].A p = np.zeros((nAMLI, coarse_b.shape[0]), dtype=coarse_b.dtype) beta = np.zeros((nAMLI, nAMLI), dtype=coarse_b.dtype) for k in range(nAMLI): # New search direction --> M^{-1}*residual p[k, :] = 1 self.__solve(lvl + 1, p[k, :].reshape(coarse_b.shape), coarse_b, cycle) # Orthogonalize new search direction to old directions for j in range(k): # loops from j = 0...(k-1) beta[k, j] = np.inner(p[j, :].conj(), Ac * p[k, :]) /\ np.inner(p[j, :].conj(), Ac * p[j, :]) p[k, :] -= beta[k, j] * p[j, :] # Compute step size Ap = Ac * p[k, :] alpha = np.inner(p[k, :].conj(), np.ravel(coarse_b)) /\ np.inner(p[k, :].conj(), Ap) # Update solution coarse_x += alpha * p[k, :].reshape(coarse_x.shape) # Update residual coarse_b -= alpha * Ap.reshape(coarse_b.shape) else: raise TypeError('Unrecognized cycle type (%s)' % cycle) x += self.levels[lvl].P * coarse_x # coarse grid correction self.levels[lvl].postsmoother(A, x, b)
def __solve(self, lvl, x, b, cycle)
Multigrid cycling. Parameters ---------- lvl : int Solve problem on level `lvl` x : numpy array Initial guess `x` and return correction b : numpy array Right-hand side for Ax=b cycle : {'V','W','F','AMLI'} Recursively called cycling function. The Defines the cycling used: cycle = 'V', V-cycle cycle = 'W', W-cycle cycle = 'F', F-cycle cycle = 'AMLI', AMLI-cycle
2.962482
2.773053
1.068311
G = asgraph(G) N = G.shape[0] mis = np.empty(N, dtype='intc') mis[:] = -1 if k is None: if algo == 'serial': fn = amg_core.maximal_independent_set_serial fn(N, G.indptr, G.indices, -1, 1, 0, mis) elif algo == 'parallel': fn = amg_core.maximal_independent_set_parallel fn(N, G.indptr, G.indices, -1, 1, 0, mis, sp.rand(N), -1) else: raise ValueError('unknown algorithm (%s)' % algo) else: fn = amg_core.maximal_independent_set_k_parallel fn(N, G.indptr, G.indices, k, mis, sp.rand(N), -1) return mis
def maximal_independent_set(G, algo='serial', k=None)
Compute a maximal independent vertex set for a graph. Parameters ---------- G : sparse matrix Symmetric matrix, preferably in sparse CSR or CSC format The nonzeros of G represent the edges of an undirected graph. algo : {'serial', 'parallel'} Algorithm used to compute the MIS * serial : greedy serial algorithm * parallel : variant of Luby's parallel MIS algorithm Returns ------- S : array S[i] = 1 if vertex i is in the MIS S[i] = 0 otherwise Notes ----- Diagonal entries in the G (self loops) will be ignored. Luby's algorithm is significantly more expensive than the greedy serial algorithm.
2.418303
2.382103
1.015197
G = asgraph(G) N = G.shape[0] coloring = np.empty(N, dtype='intc') if method == 'MIS': fn = amg_core.vertex_coloring_mis fn(N, G.indptr, G.indices, coloring) elif method == 'JP': fn = amg_core.vertex_coloring_jones_plassmann fn(N, G.indptr, G.indices, coloring, sp.rand(N)) elif method == 'LDF': fn = amg_core.vertex_coloring_LDF fn(N, G.indptr, G.indices, coloring, sp.rand(N)) else: raise ValueError('unknown method (%s)' % method) return coloring
def vertex_coloring(G, method='MIS')
Compute a vertex coloring of a graph. Parameters ---------- G : sparse matrix Symmetric matrix, preferably in sparse CSR or CSC format The nonzeros of G represent the edges of an undirected graph. method : string Algorithm used to compute the vertex coloring: * 'MIS' - Maximal Independent Set * 'JP' - Jones-Plassmann (parallel) * 'LDF' - Largest-Degree-First (parallel) Returns ------- coloring : array An array of vertex colors (integers beginning at 0) Notes ----- Diagonal entries in the G (self loops) will be ignored.
2.739932
2.261163
1.211736
G = asgraph(G) N = G.shape[0] if maxiter is not None and maxiter < 0: raise ValueError('maxiter must be positive') if G.dtype == complex: raise ValueError('Bellman-Ford algorithm only defined for real\ weights') seeds = np.asarray(seeds, dtype='intc') distances = np.empty(N, dtype=G.dtype) distances[:] = max_value(G.dtype) distances[seeds] = 0 nearest_seed = np.empty(N, dtype='intc') nearest_seed[:] = -1 nearest_seed[seeds] = seeds old_distances = np.empty_like(distances) iter = 0 while maxiter is None or iter < maxiter: old_distances[:] = distances amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances, nearest_seed) if (old_distances == distances).all(): break return (distances, nearest_seed)
def bellman_ford(G, seeds, maxiter=None)
Bellman-Ford iteration. Parameters ---------- G : sparse matrix Returns ------- distances : array nearest_seed : array References ---------- CLR
3.010553
2.866933
1.050095
G = asgraph(G) N = G.shape[0] if G.dtype.kind == 'c': # complex dtype G = np.abs(G) # interpret seeds argument if np.isscalar(seeds): seeds = np.random.permutation(N)[:seeds] seeds = seeds.astype('intc') else: seeds = np.array(seeds, dtype='intc') if len(seeds) < 1: raise ValueError('at least one seed is required') if seeds.min() < 0: raise ValueError('invalid seed index (%d)' % seeds.min()) if seeds.max() >= N: raise ValueError('invalid seed index (%d)' % seeds.max()) clusters = np.empty(N, dtype='intc') distances = np.empty(N, dtype=G.dtype) for i in range(maxiter): last_seeds = seeds.copy() amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data, len(seeds), distances, clusters, seeds) if (seeds == last_seeds).all(): break return (distances, clusters, seeds)
def lloyd_cluster(G, seeds, maxiter=10)
Perform Lloyd clustering on graph with weighted edges. Parameters ---------- G : csr_matrix, csc_matrix A sparse NxN matrix where each nonzero entry G[i,j] is the distance between nodes i and j. seeds : int array If seeds is an integer, then its value determines the number of clusters. Otherwise, seeds is an array of unique integers between 0 and N-1 that will be used as the initial seeds for clustering. maxiter : int The maximum number of iterations to perform. Returns ------- distances : array final distances clusters : int array id of each cluster of points seeds : int array index of each seed Notes ----- If G has complex values, abs(G) is used instead.
2.645669
2.363295
1.119483
G = asgraph(G) N = G.shape[0] order = np.empty(N, G.indptr.dtype) level = np.empty(N, G.indptr.dtype) level[:] = -1 BFS = amg_core.breadth_first_search BFS(G.indptr, G.indices, int(seed), order, level) return order, level
def breadth_first_search(G, seed)
Breadth First search of a graph. Parameters ---------- G : csr_matrix, csc_matrix A sparse NxN matrix where each nonzero entry G[i,j] is the distance between nodes i and j. seed : int Index of the seed location Returns ------- order : int array Breadth first order level : int array Final levels Examples -------- 0---2 | / | / 1---4---7---8---9 | /| / | / | / 3/ 6/ | | 5 >>> import numpy as np >>> import pyamg >>> import scipy.sparse as sparse >>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5], [4,6], [4,7], [6,7], [7,8], [8,9]]) >>> N = np.max(edges.ravel())+1 >>> data = np.ones((edges.shape[0],)) >>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N)) >>> c, l = pyamg.graph.breadth_first_search(A, 0) >>> print(l) >>> print(c) [0 1 1 2 2 3 3 3 4 5] [0 1 2 3 4 5 6 7 8 9]
4.009982
4.381859
0.915132
G = asgraph(G) N = G.shape[0] components = np.empty(N, G.indptr.dtype) fn = amg_core.connected_components fn(N, G.indptr, G.indices, components) return components
def connected_components(G)
Compute the connected components of a graph. The connected components of a graph G, which is represented by a symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where K is the number of components. Parameters ---------- G : symmetric matrix, preferably in sparse CSR or CSC format The nonzeros of G represent the edges of an undirected graph. Returns ------- components : ndarray An array of component labels for each vertex of the graph. Notes ----- If the nonzero structure of G is not symmetric, then the result is undefined. Examples -------- >>> from pyamg.graph import connected_components >>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] ) [0 0 0] >>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] ) [0 0 1] >>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] ) [0 1 2] >>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] ) [0 0 1 1]
4.709032
6.088264
0.773461
n = A.shape[0] root, order, level = pseudo_peripheral_node(A) Perm = sparse.identity(n, format='csr') p = level.argsort() Perm = Perm[p, :] return Perm * A * Perm.T
def symmetric_rcm(A)
Symmetric Reverse Cutthill-McKee. Parameters ---------- A : sparse matrix Sparse matrix Returns ------- B : sparse matrix Permuted matrix with reordering Notes ----- Get a pseudo-peripheral node, then call BFS Examples -------- >>> from pyamg import gallery >>> from pyamg.graph import symmetric_rcm >>> n = 200 >>> density = 1.0/n >>> A = gallery.sprand(n, n, density, format='csr') >>> S = A + A.T >>> # try the visualizations >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(121) >>> plt.spy(S,marker='.') >>> plt.subplot(122) >>> plt.spy(symmetric_rcm(S),marker='.') See Also -------- pseudo_peripheral_node
7.90971
5.810686
1.361235
from pyamg.graph import breadth_first_search n = A.shape[0] valence = np.diff(A.indptr) # select an initial node x, set delta = 0 x = int(np.random.rand() * n) delta = 0 while True: # do a level-set traversal from x order, level = breadth_first_search(A, x) # select a node y in the last level with min degree maxlevel = level.max() lastnodes = np.where(level == maxlevel)[0] lastnodesvalence = valence[lastnodes] minlastnodesvalence = lastnodesvalence.min() y = np.where(lastnodesvalence == minlastnodesvalence)[0][0] y = lastnodes[y] # if d(x,y)>delta, set, and go to bfs above if level[y] > delta: x = y delta = level[y] else: return x, order, level
def pseudo_peripheral_node(A)
Find a pseudo peripheral node. Parameters ---------- A : sparse matrix Sparse matrix Returns ------- x : int Locaiton of the node order : array BFS ordering level : array BFS levels Notes ----- Algorithm in Saad
4.319782
3.948699
1.093976
A = ml.levels[0].A b = A * sp.rand(A.shape[0], 1) residuals = [] if accel is None: ml.solve(b, residuals=residuals, **kwargs) else: def callback(x): residuals.append(norm(np.ravel(b) - np.ravel(A*x))) M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V')) accel(A, b, M=M, callback=callback, **kwargs) return np.asarray(residuals)
def profile_solver(ml, accel=None, **kwargs)
Profile a particular multilevel object. Parameters ---------- ml : multilevel Fully constructed multilevel object accel : function pointer Pointer to a valid Krylov solver (e.g. gmres, cg) Returns ------- residuals : array Array of residuals for each iteration See Also -------- multilevel.psolve, multilevel.solve Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags, csr_matrix >>> from scipy.sparse.linalg import cg >>> from pyamg.classical import ruge_stuben_solver >>> from pyamg.util.utils import profile_solver >>> n=100 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = csr_matrix(spdiags(data,[-1,0,1],n,n)) >>> b = A*np.ones(A.shape[0]) >>> ml = ruge_stuben_solver(A, max_coarse=10) >>> res = profile_solver(ml,accel=cg)
4.00344
4.651523
0.860673
if isspmatrix(A): return A.diagonal() else: if(np.ndim(A) != 1): raise ValueError('input diagonal array expected to be 1d') return csr_matrix((np.asarray(A), np.arange(len(A)), np.arange(len(A)+1)), (len(A), len(A)))
def diag_sparse(A)
Return a diagonal. If A is a sparse matrix (e.g. csr_matrix or csc_matrix) - return the diagonal of A as an array Otherwise - return a csr_matrix with A on the diagonal Parameters ---------- A : sparse matrix or 1d array General sparse matrix or array of diagonal entries Returns ------- B : array or sparse matrix Diagonal sparse is returned as csr if A is dense otherwise return an array of the diagonal Examples -------- >>> import numpy as np >>> from pyamg.util.utils import diag_sparse >>> d = 2.0*np.ones((3,)).ravel() >>> print diag_sparse(d).todense() [[ 2. 0. 0.] [ 0. 2. 0.] [ 0. 0. 2.]]
3.505005
3.870257
0.905626
v = np.ravel(v) M, N = A.shape if not isspmatrix(A): raise ValueError('scale rows needs a sparse matrix') if M != len(v): raise ValueError('scale vector has incompatible shape') if copy: A = A.copy() A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype)) else: v = np.asarray(v, dtype=A.dtype) if isspmatrix_csr(A): csr_scale_rows(M, N, A.indptr, A.indices, A.data, v) elif isspmatrix_bsr(A): R, C = A.blocksize bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices, np.ravel(A.data), v) elif isspmatrix_csc(A): pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v) else: fmt = A.format A = scale_rows(csr_matrix(A), v).asformat(fmt) return A
def scale_rows(A, v, copy=True)
Scale the sparse rows of a matrix. Parameters ---------- A : sparse matrix Sparse matrix with M rows v : array_like Array of M scales copy : {True,False} - If copy=True, then the matrix is copied to a new and different return matrix (e.g. B=scale_rows(A,v)) - If copy=False, then the matrix is overwritten deeply (e.g. scale_rows(A,v,copy=False) overwrites A) Returns ------- A : sparse matrix Scaled sparse matrix in original format See Also -------- scipy.sparse._sparsetools.csr_scale_rows, scale_columns Notes ----- - if A is a csc_matrix, the transpose A.T is passed to scale_columns - if A is not csr, csc, or bsr, it is converted to csr and sent to scale_rows Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.utils import scale_rows >>> n=5 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n-1).tocsr() >>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
2.599241
3.070634
0.846484
v = np.ravel(v) M, N = A.shape if not isspmatrix(A): raise ValueError('scale columns needs a sparse matrix') if N != len(v): raise ValueError('scale vector has incompatible shape') if copy: A = A.copy() A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype)) else: v = np.asarray(v, dtype=A.dtype) if isspmatrix_csr(A): csr_scale_columns(M, N, A.indptr, A.indices, A.data, v) elif isspmatrix_bsr(A): R, C = A.blocksize bsr_scale_columns(int(M/R), int(N/C), R, C, A.indptr, A.indices, np.ravel(A.data), v) elif isspmatrix_csc(A): pyamg.amg_core.csc_scale_columns(M, N, A.indptr, A.indices, A.data, v) else: fmt = A.format A = scale_columns(csr_matrix(A), v).asformat(fmt) return A
def scale_columns(A, v, copy=True)
Scale the sparse columns of a matrix. Parameters ---------- A : sparse matrix Sparse matrix with N rows v : array_like Array of N scales copy : {True,False} - If copy=True, then the matrix is copied to a new and different return matrix (e.g. B=scale_columns(A,v)) - If copy=False, then the matrix is overwritten deeply (e.g. scale_columns(A,v,copy=False) overwrites A) Returns ------- A : sparse matrix Scaled sparse matrix in original format See Also -------- scipy.sparse._sparsetools.csr_scale_columns, scale_rows Notes ----- - if A is a csc_matrix, the transpose A.T is passed to scale_rows - if A is not csr, csc, or bsr, it is converted to csr and sent to scale_rows Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.utils import scale_columns >>> n=5 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n-1).tocsr() >>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense() [[ 10. -5. 0. 0.] [ -5. 10. -5. 0.] [ 0. -5. 10. -5.] [ 0. 0. -5. 10.] [ 0. 0. 0. -5.]]
2.599377
3.018754
0.861076
if isspmatrix_csr(A) or isspmatrix_csc(A) or isspmatrix_bsr(A): if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix') D = diag_sparse(A) mask = (D != 0) if A.dtype != complex: D_sqrt = np.sqrt(abs(D)) else: # We can take square roots of negative numbers D_sqrt = np.sqrt(D) D_sqrt_inv = np.zeros_like(D_sqrt) D_sqrt_inv[mask] = 1.0/D_sqrt[mask] DAD = scale_rows(A, D_sqrt_inv, copy=copy) DAD = scale_columns(DAD, D_sqrt_inv, copy=False) return D_sqrt, D_sqrt_inv, DAD else: return symmetric_rescaling(csr_matrix(A))
def symmetric_rescaling(A, copy=True)
Scale the matrix symmetrically. A = D^{-1/2} A D^{-1/2} where D=diag(A). The left multiplication is accomplished through scale_rows and the right multiplication is done through scale columns. Parameters ---------- A : sparse matrix Sparse matrix with N rows copy : {True,False} - If copy=True, then the matrix is copied to a new and different return matrix (e.g. B=symmetric_rescaling(A)) - If copy=False, then the matrix is overwritten deeply (e.g. symmetric_rescaling(A,copy=False) overwrites A) Returns ------- D_sqrt : array Array of sqrt(diag(A)) D_sqrt_inv : array Array of 1/sqrt(diag(A)) DAD : csr_matrix Symmetrically scaled A Notes ----- - if A is not csr, it is converted to csr and sent to scale_rows Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.utils import symmetric_rescaling >>> n=5 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n).tocsr() >>> Ds, Dsi, DAD = symmetric_rescaling(A) >>> print DAD.todense() [[ 1. -0.5 0. 0. 0. ] [-0.5 1. -0.5 0. 0. ] [ 0. -0.5 1. -0.5 0. ] [ 0. 0. -0.5 1. -0.5] [ 0. 0. 0. -0.5 1. ]]
2.806417
2.721491
1.031206
# rescale A [D_sqrt, D_sqrt_inv, A] = symmetric_rescaling(A, copy=False) # scale candidates for i in range(B.shape[1]): B[:, i] = np.ravel(B[:, i])*np.ravel(D_sqrt) if hasattr(A, 'symmetry'): if A.symmetry == 'nonsymmetric': if BH is None: raise ValueError("BH should be an n x m array") else: for i in range(BH.shape[1]): BH[:, i] = np.ravel(BH[:, i])*np.ravel(D_sqrt) return [A, B, BH]
def symmetric_rescaling_sa(A, B, BH=None)
Scale the matrix symmetrically. A = D^{-1/2} A D^{-1/2} where D=diag(A). The left multiplication is accomplished through scale_rows and the right multiplication is done through scale columns. The candidates B and BH are scaled accordingly:: B = D^{1/2} B BH = D^{1/2} BH Parameters ---------- A : {sparse matrix} Sparse matrix with N rows B : {array} N x m array BH : {None, array} If A.symmetry == 'nonsymmetric, then BH must be an N x m array. Otherwise, BH is ignored. Returns ------- Appropriately scaled A, B and BH, i.e., A = D^{-1/2} A D^{-1/2}, B = D^{1/2} B, and BH = D^{1/2} BH Notes ----- - if A is not csr, it is converted to csr and sent to scale_rows Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.utils import symmetric_rescaling_sa >>> n=5 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n).tocsr() >>> B = e.copy().reshape(-1,1) >>> [DAD, DB, DBH] = symmetric_rescaling_sa(A,B,BH=None) >>> print DAD.todense() [[ 1. -0.5 0. 0. 0. ] [-0.5 1. -0.5 0. 0. ] [ 0. -0.5 1. -0.5 0. ] [ 0. 0. -0.5 1. -0.5] [ 0. 0. 0. -0.5 1. ]] >>> print DB [[ 1.41421356] [ 1.41421356] [ 1.41421356] [ 1.41421356] [ 1.41421356]]
3.339621
3.42918
0.973883
varlist = to_type(upcast_type, varlist) for i in range(len(varlist)): if np.isscalar(varlist[i]): varlist[i] = np.array([varlist[i]]) return varlist
def type_prep(upcast_type, varlist)
Upcast variables to a type. Loop over all elements of varlist and convert them to upcasttype The only difference with pyamg.util.utils.to_type(...), is that scalars are wrapped into (1,0) arrays. This is desirable when passing the numpy complex data type to C routines and complex scalars aren't handled correctly Parameters ---------- upcast_type : data type e.g. complex, float64 or complex128 varlist : list list may contain arrays, mat's, sparse matrices, or scalars the elements may be float, int or complex Returns ------- Returns upcast-ed varlist to upcast_type Notes ----- Useful when harmonizing the types of variables, such as if A and b are complex, but x,y and z are not. Examples -------- >>> import numpy as np >>> from pyamg.util.utils import type_prep >>> from scipy.sparse.sputils import upcast >>> x = np.ones((5,1)) >>> y = 2.0j*np.ones((5,1)) >>> z = 2.3 >>> varlist = type_prep(upcast(x.dtype, y.dtype), [x, y, z])
2.466162
3.575888
0.689664
# convert_type = type(np.array([0], upcast_type)[0]) for i in range(len(varlist)): # convert scalars to complex if np.isscalar(varlist[i]): varlist[i] = np.array([varlist[i]], upcast_type)[0] else: # convert sparse and dense mats to complex try: if varlist[i].dtype != upcast_type: varlist[i] = varlist[i].astype(upcast_type) except AttributeError: warn('Failed to cast in to_type') pass return varlist
def to_type(upcast_type, varlist)
Loop over all elements of varlist and convert them to upcasttype. Parameters ---------- upcast_type : data type e.g. complex, float64 or complex128 varlist : list list may contain arrays, mat's, sparse matrices, or scalars the elements may be float, int or complex Returns ------- Returns upcast-ed varlist to upcast_type Notes ----- Useful when harmonizing the types of variables, such as if A and b are complex, but x,y and z are not. Examples -------- >>> import numpy as np >>> from pyamg.util.utils import to_type >>> from scipy.sparse.sputils import upcast >>> x = np.ones((5,1)) >>> y = 2.0j*np.ones((5,1)) >>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
3.169989
3.582437
0.884869
# if not isspmatrix(A): if not (isspmatrix_csr(A) or isspmatrix_csc(A) or isspmatrix_bsr(A)): warn('Implicit conversion to sparse matrix') A = csr_matrix(A) # critical to sort the indices of A A.sort_indices() if norm_eq == 1: # This transpose involves almost no work, use csr data structures as # csc, or vice versa At = A.T D = (At.multiply(At.conjugate()))*np.ones((At.shape[0],)) elif norm_eq == 2: D = (A.multiply(A.conjugate()))*np.ones((A.shape[0],)) else: D = A.diagonal() if inv: Dinv = np.zeros_like(D) mask = (D != 0.0) Dinv[mask] = 1.0 / D[mask] return Dinv else: return D
def get_diagonal(A, norm_eq=False, inv=False)
Return the diagonal or inverse of diagonal for A, (A.H A) or (A A.H). Parameters ---------- A : {dense or sparse matrix} e.g. array, matrix, csr_matrix, ... norm_eq : {0, 1, 2} 0 ==> D = diag(A) 1 ==> D = diag(A.H A) 2 ==> D = diag(A A.H) inv : {True, False} If True, D = 1.0/D Returns ------- diagonal, D, of appropriate system Notes ----- This function is especially useful for its fast methods of obtaining diag(A A.H) and diag(A.H A). Dinv is zero wherever D is zero Examples -------- >>> from pyamg.util.utils import get_diagonal >>> from pyamg.gallery import poisson >>> A = poisson( (5,), format='csr' ) >>> D = get_diagonal(A) >>> print D [ 2. 2. 2. 2. 2.] >>> D = get_diagonal(A, norm_eq=1, inv=True) >>> print D [ 0.2 0.16666667 0.16666667 0.16666667 0.2 ]
3.53531
3.715309
0.951552
if not isspmatrix(A): raise TypeError('Expected sparse matrix') if A.shape[0] != A.shape[1]: raise ValueError("Expected square matrix") if sp.mod(A.shape[0], blocksize) != 0: raise ValueError("blocksize and A.shape must be compatible") # If the block diagonal of A already exists, return that if hasattr(A, 'block_D_inv') and inv_flag: if (A.block_D_inv.shape[1] == blocksize) and\ (A.block_D_inv.shape[2] == blocksize) and \ (A.block_D_inv.shape[0] == int(A.shape[0]/blocksize)): return A.block_D_inv elif hasattr(A, 'block_D') and (not inv_flag): if (A.block_D.shape[1] == blocksize) and\ (A.block_D.shape[2] == blocksize) and \ (A.block_D.shape[0] == int(A.shape[0]/blocksize)): return A.block_D # Convert to BSR if not isspmatrix_bsr(A): A = bsr_matrix(A, blocksize=(blocksize, blocksize)) if A.blocksize != (blocksize, blocksize): A = A.tobsr(blocksize=(blocksize, blocksize)) # Peel off block diagonal by extracting block entries from the now BSR # matrix A A = A.asfptype() block_diag = sp.zeros((int(A.shape[0]/blocksize), blocksize, blocksize), dtype=A.dtype) AAIJ = (sp.arange(1, A.indices.shape[0]+1), A.indices, A.indptr) shape = (int(A.shape[0]/blocksize), int(A.shape[0]/blocksize)) diag_entries = csr_matrix(AAIJ, shape=shape).diagonal() diag_entries -= 1 nonzero_mask = (diag_entries != -1) diag_entries = diag_entries[nonzero_mask] if diag_entries.shape != (0,): block_diag[nonzero_mask, :, :] = A.data[diag_entries, :, :] if inv_flag: # Invert each block if block_diag.shape[1] < 7: # This specialized routine lacks robustness for large matrices pyamg.amg_core.pinv_array(block_diag.ravel(), block_diag.shape[0], block_diag.shape[1], 'T') else: pinv_array(block_diag) A.block_D_inv = block_diag else: A.block_D = block_diag return block_diag
def get_block_diag(A, blocksize, inv_flag=True)
Return the block diagonal of A, in array form. Parameters ---------- A : csr_matrix assumed to be square blocksize : int square block size for the diagonal inv_flag : bool if True, return the inverse of the block diagonal Returns ------- block_diag : array block diagonal of A in array form, array size is (A.shape[0]/blocksize, blocksize, blocksize) Examples -------- >>> from scipy import arange >>> from scipy.sparse import csr_matrix >>> from pyamg.util import get_block_diag >>> A = csr_matrix(arange(36).reshape(6,6)) >>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False) >>> print block_diag_inv [[[ 0. 1.] [ 6. 7.]] <BLANKLINE> [[ 14. 15.] [ 20. 21.]] <BLANKLINE> [[ 28. 29.] [ 34. 35.]]] >>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
2.577073
2.66388
0.967413
if blocksize == 1: return A elif sp.mod(A.shape[0], blocksize) != 0: raise ValueError("Incompatible blocksize") A = A.tobsr(blocksize=(blocksize, blocksize)) A.sort_indices() subI = (np.ones(A.indices.shape), A.indices, A.indptr) shape = (int(A.shape[0]/A.blocksize[0]), int(A.shape[1]/A.blocksize[1])) return csr_matrix(subI, shape=shape)
def amalgamate(A, blocksize)
Amalgamate matrix A. Parameters ---------- A : csr_matrix Matrix to amalgamate blocksize : int blocksize to use while amalgamating Returns ------- A_amal : csr_matrix Amalgamated matrix A, first, convert A to BSR with square blocksize and then return a CSR matrix of ones using the resulting BSR indptr and indices Notes ----- inverse operation of UnAmal for square matrices Examples -------- >>> from numpy import array >>> from scipy.sparse import csr_matrix >>> from pyamg.util.utils import amalgamate >>> row = array([0,0,1]) >>> col = array([0,2,1]) >>> data = array([1,2,3]) >>> A = csr_matrix( (data,(row,col)), shape=(4,4) ) >>> A.todense() matrix([[1, 0, 2, 0], [0, 3, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) >>> amalgamate(A,2).todense() matrix([[ 1., 1.], [ 0., 0.]])
3.27873
3.585194
0.91452
data = np.ones((A.indices.shape[0], RowsPerBlock, ColsPerBlock)) blockI = (data, A.indices, A.indptr) shape = (RowsPerBlock*A.shape[0], ColsPerBlock*A.shape[1]) return bsr_matrix(blockI, shape=shape)
def UnAmal(A, RowsPerBlock, ColsPerBlock)
Unamalgamate a CSR A with blocks of 1's. This operation is equivalent to replacing each entry of A with ones(RowsPerBlock, ColsPerBlock), i.e., this is equivalent to setting all of A's nonzeros to 1 and then doing a Kronecker product between A and ones(RowsPerBlock, ColsPerBlock). Parameters ---------- A : csr_matrix Amalgamted matrix RowsPerBlock : int Give A blocks of size (RowsPerBlock, ColsPerBlock) ColsPerBlock : int Give A blocks of size (RowsPerBlock, ColsPerBlock) Returns ------- A : bsr_matrix Returns A.data[:] = 1, followed by a Kronecker product of A and ones(RowsPerBlock, ColsPerBlock) Examples -------- >>> from numpy import array >>> from scipy.sparse import csr_matrix >>> from pyamg.util.utils import UnAmal >>> row = array([0,0,1,2,2,2]) >>> col = array([0,2,2,0,1,2]) >>> data = array([1,2,3,4,5,6]) >>> A = csr_matrix( (data,(row,col)), shape=(3,3) ) >>> A.todense() matrix([[1, 0, 2], [0, 0, 3], [4, 5, 6]]) >>> UnAmal(A,2,2).todense() matrix([[ 1., 1., 0., 0., 1., 1.], [ 1., 1., 0., 0., 1., 1.], [ 0., 0., 0., 0., 1., 1.], [ 0., 0., 0., 0., 1., 1.], [ 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1.]])
3.181842
3.917473
0.812218
table_str = '\n' # sometimes, the table will be passed in as (title, table) if isinstance(table, tuple): title = table[0] table = table[1] # Calculate each column's width colwidths = [] for i in range(len(table)): # extend colwidths for row i for k in range(len(table[i]) - len(colwidths)): colwidths.append(-1) # Update colwidths if table[i][j] is wider than colwidth[j] for j in range(len(table[i])): if len(table[i][j]) > colwidths[j]: colwidths[j] = len(table[i][j]) # Factor in extra column padding for i in range(len(colwidths)): colwidths[i] += col_padding # Total table width ttwidth = sum(colwidths) + len(delim)*(len(colwidths)-1) # Print Title if len(title) > 0: title = title.split("\n") for i in range(len(title)): table_str += str.center(title[i], ttwidth) + '\n' table_str += "\n" # Choose centering scheme centering = centering.lower() if centering == 'center': centering = str.center if centering == 'right': centering = str.rjust if centering == 'left': centering = str.ljust if header: # Append Column Headers for elmt, elmtwidth in zip(table[0], colwidths): table_str += centering(str(elmt), elmtwidth) + delim if table[0] != []: table_str = table_str[:-len(delim)] + '\n' # Append Header Separator # Total Column Width Total Col Delimiter Widths if len(headerchar) == 0: headerchar = ' ' table_str += headerchar *\ int(sp.ceil(float(ttwidth)/float(len(headerchar)))) + '\n' table = table[1:] for row in table: for elmt, elmtwidth in zip(row, colwidths): table_str += centering(str(elmt), elmtwidth) + delim if row != []: table_str = table_str[:-len(delim)] + '\n' else: table_str += '\n' return table_str
def print_table(table, title='', delim='|', centering='center', col_padding=2, header=True, headerchar='-')
Print a table from a list of lists representing the rows of a table. Parameters ---------- table : list list of lists, e.g. a table with 3 columns and 2 rows could be [ ['0,0', '0,1', '0,2'], ['1,0', '1,1', '1,2'] ] title : string Printed centered above the table delim : string character to delimit columns centering : {'left', 'right', 'center'} chooses justification for columns col_padding : int number of blank spaces to add to each column header : {True, False} Does the first entry of table contain column headers? headerchar : {string} character to separate column headers from rest of table Returns ------- string representing table that's ready to be printed Notes ----- The string for the table will have correctly justified columns with extra padding added into each column entry to ensure columns align. The characters to delimit the columns can be user defined. This should be useful for printing convergence data from tests. Examples -------- >>> from pyamg.util.utils import print_table >>> table = [ ['cos(0)', 'cos(pi/2)', 'cos(pi)'], ['0.0', '1.0', '0.0'] ] >>> table1 = print_table(table) # string to print >>> table2 = print_table(table, delim='||') >>> table3 = print_table(table, headerchar='*') >>> table4 = print_table(table, col_padding=6, centering='left')
2.560843
2.730898
0.937729
from pyamg import relaxation from scipy.sparse.linalg.interface import LinearOperator import pyamg.multilevel def unpack_arg(v): if isinstance(v, tuple): return v[0], v[1] else: return v, {} # setup variables accepted_methods = ['gauss_seidel', 'block_gauss_seidel', 'sor', 'gauss_seidel_ne', 'gauss_seidel_nr', 'jacobi', 'block_jacobi', 'richardson', 'schwarz', 'strength_based_schwarz', 'jacobi_ne'] b = np.array(b, dtype=A.dtype) fn, kwargs = unpack_arg(method) lvl = pyamg.multilevel_solver.level() lvl.A = A # Retrieve setup call from relaxation.smoothing for this relaxation method if not accepted_methods.__contains__(fn): raise NameError("invalid relaxation method: ", fn) try: setup_smoother = getattr(relaxation.smoothing, 'setup_' + fn) except NameError: raise NameError("invalid presmoother method: ", fn) # Get relaxation routine that takes only (A, x, b) as parameters relax = setup_smoother(lvl, **kwargs) # Define matvec def matvec(x): xcopy = x.copy() relax(A, xcopy, b) return xcopy return LinearOperator(A.shape, matvec, dtype=A.dtype)
def relaxation_as_linear_operator(method, A, b)
Create a linear operator that applies a relaxation method for the given right-hand-side. Parameters ---------- methods : {tuple or string} Relaxation descriptor: Each tuple must be of the form ('method','opts') where 'method' is the name of a supported smoother, e.g., gauss_seidel, and 'opts' a dict of keyword arguments to the smoother, e.g., opts = {'sweep':symmetric}. If string, must be that of a supported smoother, e.g., gauss_seidel. Returns ------- linear operator that applies the relaxation method to a vector for a fixed right-hand-side, b. Notes ----- This method is primarily used to improve B during the aggregation setup phase. Here b = 0, and each relaxation call can improve the quality of B, especially near the boundaries. Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import relaxation_as_linear_operator >>> import numpy as np >>> A = poisson((100,100), format='csr') # matrix >>> B = np.ones((A.shape[0],1)) # Candidate vector >>> b = np.zeros((A.shape[0])) # RHS >>> relax = relaxation_as_linear_operator('gauss_seidel', A, b) >>> B = relax*B
4.213129
4.379918
0.96192
if not isspmatrix_bsr(C) and not isspmatrix_csr(C): raise TypeError('Expected bsr_matrix or csr_matrix for C') if C.shape[1] != B.shape[0]: raise TypeError('Expected matching dimensions such that C*B') # Problem parameters if isspmatrix_bsr(C): ColsPerBlock = C.blocksize[1] RowsPerBlock = C.blocksize[0] else: ColsPerBlock = 1 RowsPerBlock = 1 Ncoarse = C.shape[1] Nfine = C.shape[0] NullDim = B.shape[1] Nnodes = int(Nfine/RowsPerBlock) # Construct BtB BtBinv = np.zeros((Nnodes, NullDim, NullDim), dtype=B.dtype) BsqCols = sum(range(NullDim+1)) Bsq = np.zeros((Ncoarse, BsqCols), dtype=B.dtype) counter = 0 for i in range(NullDim): for j in range(i, NullDim): Bsq[:, counter] = np.conjugate(np.ravel(np.asarray(B[:, i]))) * \ np.ravel(np.asarray(B[:, j])) counter = counter + 1 # This specialized C-routine calculates (B.T B) for each row using Bsq pyamg.amg_core.calc_BtB(NullDim, Nnodes, ColsPerBlock, np.ravel(np.asarray(Bsq)), BsqCols, np.ravel(np.asarray(BtBinv)), C.indptr, C.indices) # Invert each block of BtBinv, noting that amg_core.calc_BtB(...) returns # values in column-major form, thus necessitating the deep transpose # This is the old call to a specialized routine, but lacks robustness # pyamg.amg_core.pinv_array(np.ravel(BtBinv), Nnodes, NullDim, 'F') BtBinv = BtBinv.transpose((0, 2, 1)).copy() pinv_array(BtBinv) return BtBinv
def compute_BtBinv(B, C)
Create block inverses. Helper function that creates inv(B_i.T B_i) for each block row i in C, where B_i is B restricted to the sparsity pattern of block row i. Parameters ---------- B : {array} (M,k) array, typically near-nullspace modes for coarse grid, i.e., B_c. C : {csr_matrix, bsr_matrix} Sparse NxM matrix, whose sparsity structure (i.e., matrix graph) is used to determine BtBinv. Returns ------- BtBinv : {array} BtBinv[i] = inv(B_i.T B_i), where B_i is B restricted to the nonzero pattern of block row i in C. Examples -------- >>> from numpy import array >>> from scipy.sparse import bsr_matrix >>> from pyamg.util.utils import compute_BtBinv >>> T = array([[ 1., 0.], ... [ 1., 0.], ... [ 0., .5], ... [ 0., .25]]) >>> T = bsr_matrix(T) >>> B = array([[1.],[2.]]) >>> compute_BtBinv(B, T) array([[[ 1. ]], <BLANKLINE> [[ 1. ]], <BLANKLINE> [[ 0.25]], <BLANKLINE> [[ 0.25]]]) Notes ----- The principal calling routines are aggregation.smooth.energy_prolongation_smoother, and util.utils.filter_operator. BtBinv is used in the prolongation smoothing process that incorporates B into the span of prolongation with row-wise projection operators. It is these projection operators that BtBinv is part of.
4.06827
4.049902
1.004535
r # Find the diagonally dominant rows in A. A_abs = A.copy() A_abs.data = np.abs(A_abs.data) D_abs = get_diagonal(A_abs, norm_eq=0, inv=False) diag_dom_rows = (D_abs > (theta*(A_abs*np.ones((A_abs.shape[0],), dtype=A_abs) - D_abs))) # Account for BSR matrices and translate diag_dom_rows from dofs to nodes bsize = blocksize(A_abs) if bsize > 1: diag_dom_rows = np.array(diag_dom_rows, dtype=int) diag_dom_rows = diag_dom_rows.reshape(-1, bsize) diag_dom_rows = np.sum(diag_dom_rows, axis=1) diag_dom_rows = (diag_dom_rows == bsize) # Replace these rows/cols in # C with rows/cols of the identity. Id = eye(C.shape[0], C.shape[1], format='csr') Id.data[diag_dom_rows] = 0.0 C = Id * C * Id Id.data[diag_dom_rows] = 1.0 Id.data[np.where(diag_dom_rows == 0)[0]] = 0.0 C = C + Id del A_abs return C
def eliminate_diag_dom_nodes(A, C, theta=1.02)
r"""Eliminate diagonally dominance. Helper function that eliminates diagonally dominant rows and cols from A in the separate matrix C. This is useful because it eliminates nodes in C which we don't want coarsened. These eliminated nodes in C just become the rows and columns of the identity. Parameters ---------- A : {csr_matrix, bsr_matrix} Sparse NxN matrix C : {csr_matrix} Sparse MxM matrix, where M is the number of nodes in A. M=N if A is CSR or is BSR with blocksize 1. Otherwise M = N/blocksize. theta : {float} determines diagonal dominance threshhold Returns ------- C : {csr_matrix} C updated such that the rows and columns corresponding to diagonally dominant rows in A have been eliminated and replaced with rows and columns of the identity. Notes ----- Diagonal dominance is defined as :math:`\| (e_i, A) - a_{ii} \|_1 < \\theta a_{ii}` that is, the 1-norm of the off diagonal elements in row i must be less than theta times the diagonal element. Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import eliminate_diag_dom_nodes >>> A = poisson( (4,), format='csr' ) >>> C = eliminate_diag_dom_nodes(A, A.copy(), 1.1) >>> C.todense() matrix([[ 1., 0., 0., 0.], [ 0., 2., -1., 0.], [ 0., -1., 2., 0.], [ 0., 0., 0., 1.]])
3.741065
3.610042
1.036294
if not isspmatrix_csr(S): raise TypeError('expected csr_matrix') if S.shape[0] != S.shape[1]: raise ValueError('expected square matrix, shape=%s' % (S.shape,)) S = coo_matrix(S) mask = S.row != S.col S.row = S.row[mask] S.col = S.col[mask] S.data = S.data[mask] return S.tocsr()
def remove_diagonal(S)
Remove the diagonal of the matrix S. Parameters ---------- S : csr_matrix Square matrix Returns ------- S : csr_matrix Strength matrix with the diagonal removed Notes ----- This is needed by all the splitting routines which operate on matrix graphs with an assumed zero diagonal Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import remove_diagonal >>> A = poisson( (4,), format='csr' ) >>> C = remove_diagonal(A) >>> C.todense() matrix([[ 0., -1., 0., 0.], [-1., 0., -1., 0.], [ 0., -1., 0., -1.], [ 0., 0., -1., 0.]])
2.166274
2.890501
0.749446
if not isspmatrix_csr(S): raise TypeError('expected csr_matrix') # Scale S by the largest magnitude entry in each row largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype) pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry, S.indptr, S.indices, S.data) largest_row_entry[largest_row_entry != 0] =\ 1.0 / largest_row_entry[largest_row_entry != 0] S = scale_rows(S, largest_row_entry, copy=True) return S
def scale_rows_by_largest_entry(S)
Scale each row in S by it's largest in magnitude entry. Parameters ---------- S : csr_matrix Returns ------- S : csr_matrix Each row has been scaled by it's largest in magnitude entry Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import scale_rows_by_largest_entry >>> A = poisson( (4,), format='csr' ) >>> A.data[1] = 5.0 >>> A = scale_rows_by_largest_entry(A) >>> A.todense() matrix([[ 0.4, 1. , 0. , 0. ], [-0.5, 1. , -0.5, 0. ], [ 0. , -0.5, 1. , -0.5], [ 0. , 0. , -0.5, 1. ]])
3.042825
3.55086
0.856926
if isinstance(to_levelize, tuple): if to_levelize[0] == 'predefined': to_levelize = [to_levelize] max_levels = 2 max_coarse = 0 else: to_levelize = [to_levelize for i in range(max_levels-1)] elif isinstance(to_levelize, str): if to_levelize == 'predefined': raise ValueError('predefined to_levelize requires a user-provided\ CSR matrix representing strength or aggregation\ i.e., (\'predefined\', {\'C\' : CSR_MAT}).') else: to_levelize = [to_levelize for i in range(max_levels-1)] elif isinstance(to_levelize, list): if isinstance(to_levelize[-1], tuple) and\ (to_levelize[-1][0] == 'predefined'): # to_levelize is a list that ends with a predefined operator max_levels = len(to_levelize) + 1 max_coarse = 0 else: # to_levelize a list that __doesn't__ end with 'predefined' if len(to_levelize) < max_levels-1: mlz = max_levels - 1 - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels-1)] else: raise ValueError('invalid to_levelize') return max_levels, max_coarse, to_levelize
def levelize_strength_or_aggregation(to_levelize, max_levels, max_coarse)
Turn parameter into a list per level. Helper function to preprocess the strength and aggregation parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered max_coarse : int Defines the maximum coarse grid size allowed Returns ------- (max_levels, max_coarse, to_levelize) : tuple New max_levels and max_coarse values and then the parameter list to_levelize, such that entry i specifies the parameter choice at level i. max_levels and max_coarse are returned, because they may be updated if strength or aggregation set a predefined coarsening and possibly change these values. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_strength_or_aggregation >>> strength = ['evolution', 'classical'] >>> levelize_strength_or_aggregation(strength, 4, 10) (4, 10, ['evolution', 'classical', 'classical'])
2.766921
2.828598
0.978195
if isinstance(to_levelize, tuple) or isinstance(to_levelize, str): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
def levelize_smooth_or_improve_candidates(to_levelize, max_levels)
Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_smooth_or_improve_candidates >>> improve_candidates = ['gauss_seidel', None] >>> levelize_smooth_or_improve_candidates(improve_candidates, 4) ['gauss_seidel', None, None, None]
2.235112
2.358583
0.94765
if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize Aformat = A.format if (theta < 0) or (theta >= 1.0): raise ValueError("theta must be in [0,1)") # Apply drop-tolerance to each column of A, which is most easily # accessed by converting to CSC. We apply the drop-tolerance with # amg_core.classical_strength_of_connection(), which ignores # diagonal entries, thus necessitating the trick where we add # A.shape[1] to each of the column indices A = A.copy().tocsc() A_filter = A.copy() A.indices += A.shape[1] A_filter.indices += A.shape[1] # classical_strength_of_connection takes an absolute value internally pyamg.amg_core.classical_strength_of_connection_abs( A.shape[1], theta, A.indptr, A.indices, A.data, A_filter.indptr, A_filter.indices, A_filter.data) A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[1] A_filter = csc_matrix((A_filter.data[:A_filter.indptr[-1]], A_filter.indices[:A_filter.indptr[-1]], A_filter.indptr), shape=A_filter.shape) del A if Aformat == 'bsr': A_filter = A_filter.tobsr(blocksize) else: A_filter = A_filter.asformat(Aformat) return A_filter
def filter_matrix_columns(A, theta)
Filter each column of A with tol. i.e., drop all entries in column k where abs(A[i,k]) < tol max( abs(A[:,k]) ) Parameters ---------- A : sparse_matrix theta : float In range [0,1) and defines drop-tolerance used to filter the columns of A Returns ------- A_filter : sparse_matrix Each column has been filtered by dropping all entries where abs(A[i,k]) < tol max( abs(A[:,k]) ) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import filter_matrix_columns >>> from scipy import array >>> from scipy.sparse import csr_matrix >>> A = csr_matrix( array([[ 0.24, 1. , 0. ], ... [-0.5 , 1. , -0.5 ], ... [ 0. , 0.49, 1. ], ... [ 0. , 0. , -0.5 ]]) ) >>> filter_matrix_columns(A, 0.5).todense() matrix([[ 0. , 1. , 0. ], [-0.5, 1. , -0.5], [ 0. , 0. , 1. ], [ 0. , 0. , -0.5]])
3.541747
3.445539
1.027922
if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize Aformat = A.format A = A.tocsr() if (theta < 0) or (theta >= 1.0): raise ValueError("theta must be in [0,1)") # Apply drop-tolerance to each row of A. We apply the drop-tolerance with # amg_core.classical_strength_of_connection(), which ignores diagonal # entries, thus necessitating the trick where we add A.shape[0] to each of # the row indices A_filter = A.copy() A.indices += A.shape[0] A_filter.indices += A.shape[0] # classical_strength_of_connection takes an absolute value internally pyamg.amg_core.classical_strength_of_connection_abs( A.shape[0], theta, A.indptr, A.indices, A.data, A_filter.indptr, A_filter.indices, A_filter.data) A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[0] A_filter = csr_matrix((A_filter.data[:A_filter.indptr[-1]], A_filter.indices[:A_filter.indptr[-1]], A_filter.indptr), shape=A_filter.shape) if Aformat == 'bsr': A_filter = A_filter.tobsr(blocksize) else: A_filter = A_filter.asformat(Aformat) A.indices -= A.shape[0] return A_filter
def filter_matrix_rows(A, theta)
Filter each row of A with tol. i.e., drop all entries in row k where abs(A[i,k]) < tol max( abs(A[:,k]) ) Parameters ---------- A : sparse_matrix theta : float In range [0,1) and defines drop-tolerance used to filter the row of A Returns ------- A_filter : sparse_matrix Each row has been filtered by dropping all entries where abs(A[i,k]) < tol max( abs(A[:,k]) ) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import filter_matrix_rows >>> from scipy import array >>> from scipy.sparse import csr_matrix >>> A = csr_matrix( array([[ 0.24, -0.5 , 0. , 0. ], ... [ 1. , 1. , 0.49, 0. ], ... [ 0. , -0.5 , 1. , -0.5 ]]) ) >>> filter_matrix_rows(A, 0.5).todense() matrix([[ 0. , -0.5, 0. , 0. ], [ 1. , 1. , 0. , 0. ], [ 0. , -0.5, 1. , -0.5]])
3.22081
3.146634
1.023573
if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize if isspmatrix_csr(A): A = A.copy() # don't modify A in-place Aformat = A.format A = A.tocsr() nz_per_row = int(nz_per_row) # Truncate rows of A, and then convert A back to original format pyamg.amg_core.truncate_rows_csr(A.shape[0], nz_per_row, A.indptr, A.indices, A.data) A.eliminate_zeros() if Aformat == 'bsr': A = A.tobsr(blocksize) else: A = A.asformat(Aformat) return A
def truncate_rows(A, nz_per_row)
Truncate the rows of A by keeping only the largest in magnitude entries in each row. Parameters ---------- A : sparse_matrix nz_per_row : int Determines how many entries in each row to keep Returns ------- A : sparse_matrix Each row has been truncated to at most nz_per_row entries Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import truncate_rows >>> from scipy import array >>> from scipy.sparse import csr_matrix >>> A = csr_matrix( array([[-0.24, -0.5 , 0. , 0. ], ... [ 1. , -1.1 , 0.49, 0.1 ], ... [ 0. , 0.4 , 1. , 0.5 ]]) ) >>> truncate_rows(A, 2).todense() matrix([[-0.24, -0.5 , 0. , 0. ], [ 1. , -1.1 , 0. , 0. ], [ 0. , 0. , 1. , 0.5 ]])
3.0361
3.365232
0.902196
@functools.wraps(function) def wrapper(self, *args, **kwargs): if not self.access_token(): raise MissingAccessTokenError return function(self, *args, **kwargs) return wrapper
def require_auth(function)
A decorator that wraps the passed in function and raises exception if access token is missing
2.265151
2.167509
1.045048
@functools.wraps(function) def wrapper(self, *args, **kwargs): if self.randomize: self.randomize_headers() return function(self, *args, **kwargs) return wrapper
def randomizable(function)
A decorator which randomizes requests if needed
2.207891
1.883316
1.172342
r = self._session.get(API_URL + "/logins/me") r.raise_for_status() return r.json()
def get_profile(self)
Get my own profile
5.87077
4.688266
1.252226
params = { 'year': str(starting_year), 'listing_id': str(listing_id), '_format': 'with_conditions', 'count': str(calendar_months), 'month': str(starting_month) } r = self._session.get(API_URL + "/calendar_months", params=params) r.raise_for_status() return r.json()
def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12)
Get availability calendar for a given listing
3.123988
3.028315
1.031593
params = { '_order': 'language_country', 'listing_id': str(listing_id), '_offset': str(offset), 'role': 'all', '_limit': str(limit), '_format': 'for_mobile_client', } print(self._session.headers) r = self._session.get(API_URL + "/reviews", params=params) r.raise_for_status() return r.json()
def get_reviews(self, listing_id, offset=0, limit=20)
Get reviews for a given listing
4.313791
4.255585
1.013677
params = { '_format': 'host_calendar_detailed' } starting_date_str = starting_date.strftime("%Y-%m-%d") ending_date_str = ( starting_date + datetime.timedelta(days=30)).strftime("%Y-%m-%d") r = self._session.get(API_URL + "/calendars/{}/{}/{}".format( str(listing_id), starting_date_str, ending_date_str), params=params) r.raise_for_status() return r.json()
def get_listing_calendar(self, listing_id, starting_date=datetime.datetime.now(), calendar_months=6)
Get host availability calendar for a given listing
2.977846
2.727091
1.09195
params = { 'is_guided_search': 'true', 'version': '1.3.9', 'section_offset': '0', 'items_offset': str(offset), 'adults': '0', 'screen_size': 'small', 'source': 'explore_tabs', 'items_per_grid': str(items_per_grid), '_format': 'for_explore_search_native', 'metadata_only': 'false', 'refinement_paths[]': '/homes', 'timezone': 'Europe/Lisbon', 'satori_version': '1.0.7' } if not query and not (gps_lat and gps_lng): raise MissingParameterError("Missing query or gps coordinates") if query: params['query'] = query if gps_lat and gps_lng: params['gps_lat'] = gps_lat params['gps_lng'] = gps_lng r = self._session.get(API_URL + '/explore_tabs', params=params) r.raise_for_status() return r.json()
def get_homes(self, query=None, gps_lat=None, gps_lng=None, offset=0, items_per_grid=8)
Search listings with * Query (e.g. query="Lisbon, Portugal") or * Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917)
3.511722
3.411924
1.02925
''' Note: If the shellcode starts with '66' controls, it needs to be changed to add [BITS 32] or [BITS 64] to the start. To use: convert() ''' code = code.replace(' ', '') lines = [] for l in code.splitlines(False): lines.append(l) code = ''.join(lines) # Remove new lines return code.decode('hex')
def convert(self, code)
Note: If the shellcode starts with '66' controls, it needs to be changed to add [BITS 32] or [BITS 64] to the start. To use: convert(""" 55 53 50 BDE97F071E FFD5 BDD67B071E FFD5 5D 5B 58 C3 """)
8.868506
2.658297
3.336161
''' :return list(tuple(method_name, docstring, parameters, completion_type)) method_name: str docstring: str parameters: str -- i.e.: "(a, b)" completion_type is an int See: _pydev_bundle._pydev_imports_tipper for TYPE_ constants ''' if frame is None: return [] # Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 # (Names not resolved in generator expression in method) # See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals if pydevconsole.IPYTHON: completions = pydevconsole.get_completions(act_tok, act_tok, updated_globals, frame.f_locals) else: completer = Completer(updated_globals, None) # list(tuple(name, descr, parameters, type)) completions = completer.complete(act_tok) return completions
def generate_completions(frame, act_tok)
:return list(tuple(method_name, docstring, parameters, completion_type)) method_name: str docstring: str parameters: str -- i.e.: "(a, b)" completion_type is an int See: _pydev_bundle._pydev_imports_tipper for TYPE_ constants
5.750112
3.559192
1.615567
''' Extracts the token a qualifier from the text given the line/colum (see test_extract_token_and_qualifier for examples). :param unicode text: :param int line: 0-based :param int column: 0-based ''' # Note: not using the tokenize module because text should be unicode and # line/column refer to the unicode text (otherwise we'd have to know # those ranges after converted to bytes). if line < 0: line = 0 if column < 0: column = 0 if isinstance(text, bytes): text = text.decode('utf-8') lines = text.splitlines() try: text = lines[line] except IndexError: return TokenAndQualifier(u'', u'') if column >= len(text): column = len(text) text = text[:column] token = u'' qualifier = u'' temp_token = [] for i in range(column - 1, -1, -1): c = text[i] if c in identifier_part or isidentifier(c) or c == u'.': temp_token.append(c) else: break temp_token = u''.join(reversed(temp_token)) if u'.' in temp_token: temp_token = temp_token.split(u'.') token = u'.'.join(temp_token[:-1]) qualifier = temp_token[-1] else: qualifier = temp_token return TokenAndQualifier(token, qualifier)
def extract_token_and_qualifier(text, line=0, column=0)
Extracts the token a qualifier from the text given the line/colum (see test_extract_token_and_qualifier for examples). :param unicode text: :param int line: 0-based :param int column: 0-based
3.061688
2.485926
1.231609
if self.use_main_ns: # In pydev this option should never be used raise RuntimeError('Namespace must be provided!') self.namespace = __main__.__dict__ # @UndefinedVariable if "." in text: return self.attr_matches(text) else: return self.global_matches(text)
def complete(self, text)
Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.
10.669305
11.48894
0.928659
def get_item(obj, attr): return obj[attr] a = {} for dict_with_comps in [__builtin__.__dict__, self.namespace, self.global_namespace]: # @UndefinedVariable a.update(dict_with_comps) filter = _StartsWithFilter(text) return dir2(a, a.keys(), get_item, filter)
def global_matches(self, text)
Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.
9.751831
8.102592
1.203545
import re # Another option, seems to work great. Catches things like ''.<tab> m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text) # @UndefinedVariable if not m: return [] expr, attr = m.group(1, 3) try: obj = eval(expr, self.namespace) except: try: obj = eval(expr, self.global_namespace) except: return [] filter = _StartsWithFilter(attr) words = dir2(obj, filter=filter) return words
def attr_matches(self, text)
Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.
6.728119
5.362571
1.254644
''' :param dict fmt: Format expected by the DAP (keys: 'hex': bool, 'rawString': bool) ''' safe_repr = SafeRepr() if fmt is not None: safe_repr.convert_to_hex = fmt.get('hex', False) safe_repr.raw_value = fmt.get('rawString', False) type_name, _type_qualifier, _is_exception_on_eval, resolver, value = get_variable_details( self.value, to_string=safe_repr) is_raw_string = type_name in ('str', 'unicode', 'bytes', 'bytearray') attributes = [] if is_raw_string: attributes.append('rawString') name = self.name if self._is_return_value: attributes.append('readOnly') name = '(return) %s' % (name,) var_data = { 'name': name, 'value': value, 'type': type_name, } if self.evaluate_name is not None: var_data['evaluateName'] = self.evaluate_name if resolver is not None: # I.e.: it's a container var_data['variablesReference'] = self.get_variable_reference() if len(attributes) > 0: var_data['presentationHint'] = {'attributes': attributes} return var_data
def get_var_data(self, fmt=None)
:param dict fmt: Format expected by the DAP (keys: 'hex': bool, 'rawString': bool)
4.6532
3.586666
1.297361
''' :param thread_id: The thread id to be used for this frame. :param frame: The topmost frame which is suspended at the given thread. :param frame_id_to_lineno: If available, the line number for the frame will be gotten from this dict, otherwise frame.f_lineno will be used (needed for unhandled exceptions as the place where we report may be different from the place where it's raised). :param frame_custom_thread_id: If None this this is the id of the thread id for the custom frame (i.e.: coroutine). ''' with self._lock: coroutine_or_main_thread_id = frame_custom_thread_id or thread_id if coroutine_or_main_thread_id in self._suspended_frames_manager._thread_id_to_tracker: sys.stderr.write('pydevd: Something is wrong. Tracker being added twice to the same thread id.\n') self._suspended_frames_manager._thread_id_to_tracker[coroutine_or_main_thread_id] = self self._main_thread_id = thread_id self._frame_id_to_lineno = frame_id_to_lineno frame_ids_from_thread = self._thread_id_to_frame_ids.setdefault( coroutine_or_main_thread_id, []) while frame is not None: frame_id = id(frame) self._frame_id_to_frame[frame_id] = frame _FrameVariable(frame, self._register_variable) # Instancing is enough to register. self._suspended_frames_manager._variable_reference_to_frames_tracker[frame_id] = self frame_ids_from_thread.append(frame_id) self._frame_id_to_main_thread_id[frame_id] = thread_id frame = frame.f_back
def track(self, thread_id, frame, frame_id_to_lineno, frame_custom_thread_id=None)
:param thread_id: The thread id to be used for this frame. :param frame: The topmost frame which is suspended at the given thread. :param frame_id_to_lineno: If available, the line number for the frame will be gotten from this dict, otherwise frame.f_lineno will be used (needed for unhandled exceptions as the place where we report may be different from the place where it's raised). :param frame_custom_thread_id: If None this this is the id of the thread id for the custom frame (i.e.: coroutine).
3.914327
2.290846
1.708682
''' We can't evaluate variable references values on any thread, only in the suspended thread (the main reason for this is that in UI frameworks inspecting a UI object from a different thread can potentially crash the application). :param int variable_reference: The variable reference (can be either a frame id or a reference to a previously gotten variable). :return str: The thread id for the thread to be used to inspect the given variable reference or None if the thread was already resumed. ''' frames_tracker = self._get_tracker_for_variable_reference(variable_reference) if frames_tracker is not None: return frames_tracker.get_main_thread_id() return None
def get_thread_id_for_variable_reference(self, variable_reference)
We can't evaluate variable references values on any thread, only in the suspended thread (the main reason for this is that in UI frameworks inspecting a UI object from a different thread can potentially crash the application). :param int variable_reference: The variable reference (can be either a frame id or a reference to a previously gotten variable). :return str: The thread id for the thread to be used to inspect the given variable reference or None if the thread was already resumed.
7.912347
1.584166
4.994644
''' :raises KeyError ''' frames_tracker = self._get_tracker_for_variable_reference(variable_reference) if frames_tracker is None: raise KeyError() return frames_tracker.get_variable(variable_reference)
def get_variable(self, variable_reference)
:raises KeyError
4.573453
3.963077
1.154016
matplotlib = sys.modules['matplotlib'] # WARNING: this assumes matplotlib 1.1 or newer!! backend = matplotlib.rcParams['backend'] # In this case, we need to find what the appropriate gui selection call # should be for IPython, so we can activate inputhook accordingly gui = backend2gui.get(backend, None) return gui, backend
def find_gui_and_backend()
Return the gui and mpl backend.
12.112193
11.305665
1.071338
matplotlib = sys.modules['matplotlib'] from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport if backend in interactive_bk: return True elif backend in non_interactive_bk: return False else: return matplotlib.is_interactive()
def is_interactive_backend(backend)
Check if backend is interactive
3.59159
3.372241
1.065046
matplotlib = sys.modules['matplotlib'] def patched_use(*args, **kwargs): matplotlib.real_use(*args, **kwargs) gui, backend = find_gui_and_backend() enable_gui_function(gui) matplotlib.real_use = matplotlib.use matplotlib.use = patched_use
def patch_use(enable_gui_function)
Patch matplotlib function 'use'
3.644461
3.432866
1.061638
matplotlib = sys.modules['matplotlib'] def patched_is_interactive(): return matplotlib.rcParams['interactive'] matplotlib.real_is_interactive = matplotlib.is_interactive matplotlib.is_interactive = patched_is_interactive
def patch_is_interactive()
Patch matplotlib function 'use'
3.746402
3.174348
1.180212
matplotlib = sys.modules['matplotlib'] gui, backend = find_gui_and_backend() is_interactive = is_interactive_backend(backend) if is_interactive: enable_gui_function(gui) if not matplotlib.is_interactive(): sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend) matplotlib.interactive(True) else: if matplotlib.is_interactive(): sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend) matplotlib.interactive(False) patch_use(enable_gui_function) patch_is_interactive()
def activate_matplotlib(enable_gui_function)
Set interactive to True for interactive backends. enable_gui_function - Function which enables gui, should be run in the main thread.
2.833866
2.858743
0.991298
# don't wrap twice if hasattr(func, 'called'): return func def wrapper(*args, **kw): wrapper.called = False out = func(*args, **kw) wrapper.called = True return out wrapper.called = False wrapper.__doc__ = func.__doc__ return wrapper
def flag_calls(func)
Wrap a function to detect and flag when it gets called. This is a decorator which takes a function and wraps it in a function with a 'called' attribute. wrapper.called is initialized to False. The wrapper.called attribute is set to False right before each call to the wrapped function, so if the call fails it remains False. After the call completes, wrapper.called is set to True and the output is returned. Testing for truth in wrapper.called allows you to determine if a call to func() was attempted and succeeded.
2.854527
2.705409
1.055118
''' Checks whether the file can be read by the coverage module. This is especially needed for .pyx files and .py files with syntax errors. ''' import os is_valid = False if os.path.isfile(path) and not os.path.splitext(path)[1] == '.pyx': try: with open(path, 'rb') as f: compile(f.read(), path, 'exec') is_valid = True except: pass return is_valid
def is_valid_py_file(path)
Checks whether the file can be read by the coverage module. This is especially needed for .pyx files and .py files with syntax errors.
3.624357
1.927202
1.880632
pid = self.get_pid() system = self.debug.system if system.has_process(pid): process = system.get_process(pid) else: # XXX HACK # The process object was missing for some reason, so make a new one. process = Process(pid) system._add_process(process) ## process.scan_threads() # not needed process.scan_modules() return process
def get_process(self)
@see: L{get_pid} @rtype: L{Process} @return: Process where the event occured.
5.835752
6.076789
0.960335
tid = self.get_tid() process = self.get_process() if process.has_thread(tid): thread = process.get_thread(tid) else: # XXX HACK # The thread object was missing for some reason, so make a new one. thread = Thread(tid) process._add_thread(thread) return thread
def get_thread(self)
@see: L{get_tid} @rtype: L{Thread} @return: Thread where the event occured.
4.40941
4.45682
0.989362
return bool( self.raw.u.Exception.ExceptionRecord.ExceptionFlags & \ win32.EXCEPTION_NONCONTINUABLE )
def is_noncontinuable(self)
@see: U{http://msdn.microsoft.com/en-us/library/aa363082(VS.85).aspx} @rtype: bool @return: C{True} if the exception is noncontinuable, C{False} otherwise. Attempting to continue a noncontinuable exception results in an EXCEPTION_NONCONTINUABLE_EXCEPTION exception to be raised.
26.469658
13.450535
1.967926
if index < 0 or index > win32.EXCEPTION_MAXIMUM_PARAMETERS: raise IndexError("Array index out of range: %s" % repr(index)) info = self.raw.u.Exception.ExceptionRecord.ExceptionInformation value = info[index] if value is None: value = 0 return value
def get_exception_information(self, index)
@type index: int @param index: Index into the exception information block. @rtype: int @return: Exception information DWORD.
5.544718
5.073977
1.092776
if self.get_exception_code() not in (win32.EXCEPTION_ACCESS_VIOLATION, win32.EXCEPTION_IN_PAGE_ERROR, win32.EXCEPTION_GUARD_PAGE): msg = "This method is not meaningful for %s." raise NotImplementedError(msg % self.get_exception_name()) return self.get_exception_information(0)
def get_fault_type(self)
@rtype: int @return: Access violation type. Should be one of the following constants: - L{win32.EXCEPTION_READ_FAULT} - L{win32.EXCEPTION_WRITE_FAULT} - L{win32.EXCEPTION_EXECUTE_FAULT} @note: This method is only meaningful for access violation exceptions, in-page memory error exceptions and guard page exceptions. @raise NotImplementedError: Wrong kind of exception.
4.764897
3.040203
1.567296
if self.get_exception_code() != win32.EXCEPTION_IN_PAGE_ERROR: msg = "This method is only meaningful "\ "for in-page memory error exceptions." raise NotImplementedError(msg) return self.get_exception_information(2)
def get_ntstatus_code(self)
@rtype: int @return: NTSTATUS status code that caused the exception. @note: This method is only meaningful for in-page memory error exceptions. @raise NotImplementedError: Not an in-page memory error.
10.102329
4.159399
2.428796
# The first EXCEPTION_RECORD is contained in EXCEPTION_DEBUG_INFO. # The remaining EXCEPTION_RECORD structures are linked by pointers. nested = list() record = self.raw.u.Exception while True: record = record.ExceptionRecord if not record: break nested.append(record) return nested
def get_raw_exception_record_list(self)
Traverses the exception record linked list and builds a Python list. Nested exception records are received for nested exceptions. This happens when an exception is raised in the debugee while trying to handle a previous exception. @rtype: list( L{win32.EXCEPTION_RECORD} ) @return: List of raw exception record structures as used by the Win32 API. There is always at least one exception record, so the list is never empty. All other methods of this class read from the first exception record only, that is, the most recent exception.
10.197886
8.064758
1.2645
# The list always begins with ourselves. # Just put a reference to "self" as the first element, # and start looping from the second exception record. nested = [ self ] raw = self.raw dwDebugEventCode = raw.dwDebugEventCode dwProcessId = raw.dwProcessId dwThreadId = raw.dwThreadId dwFirstChance = raw.u.Exception.dwFirstChance record = raw.u.Exception.ExceptionRecord while True: record = record.ExceptionRecord if not record: break raw = win32.DEBUG_EVENT() raw.dwDebugEventCode = dwDebugEventCode raw.dwProcessId = dwProcessId raw.dwThreadId = dwThreadId raw.u.Exception.ExceptionRecord = record raw.u.Exception.dwFirstChance = dwFirstChance event = EventFactory.get(self.debug, raw) nested.append(event) return nested
def get_nested_exceptions(self)
Traverses the exception record linked list and builds a Python list. Nested exception records are received for nested exceptions. This happens when an exception is raised in the debugee while trying to handle a previous exception. @rtype: list( L{ExceptionEvent} ) @return: List of ExceptionEvent objects representing each exception record found in this event. There is always at least one exception record, so the list is never empty. All other methods of this class read from the first exception record only, that is, the most recent exception.
4.43859
4.058439
1.093669
eventClass = cls.eventClasses.get(raw.dwDebugEventCode, cls.baseEvent) return eventClass(debug, raw)
def get(cls, debug, raw)
@type debug: L{Debug} @param debug: Debug object that received the event. @type raw: L{DEBUG_EVENT} @param raw: Raw DEBUG_EVENT structure as used by the Win32 API. @rtype: L{Event} @returns: An Event object or one of it's subclasses, depending on the event type.
8.849003
6.087083
1.453735
result = [] if self.__apiHooks: path = event.get_module().get_filename() if path: lib_name = PathOperations.pathname_to_filename(path).lower() for hook_lib, hook_api_list in compat.iteritems(self.__apiHooks): if hook_lib == lib_name: result.extend(hook_api_list) return result
def __get_hooks_for_dll(self, event)
Get the requested API hooks for the current DLL. Used by L{__hook_dll} and L{__unhook_dll}.
4.93206
4.450525
1.108197
debug = event.debug pid = event.get_pid() for hook_api_stub in self.__get_hooks_for_dll(event): hook_api_stub.hook(debug, pid)
def __hook_dll(self, event)
Hook the requested API calls (in self.apiHooks). This method is called automatically whenever a DLL is loaded.
6.983165
7.037913
0.992221
debug = event.debug pid = event.get_pid() for hook_api_stub in self.__get_hooks_for_dll(event): hook_api_stub.unhook(debug, pid)
def __unhook_dll(self, event)
Unhook the requested API calls (in self.apiHooks). This method is called automatically whenever a DLL is unloaded.
6.457569
6.391737
1.010299
eventCode = event.get_event_code() pid = event.get_pid() handler = self.forward.get(pid, None) if handler is None: handler = self.cls(*self.argv, **self.argd) if eventCode != win32.EXIT_PROCESS_DEBUG_EVENT: self.forward[pid] = handler elif eventCode == win32.EXIT_PROCESS_DEBUG_EVENT: del self.forward[pid] return handler(event)
def event(self, event)
Forwards events to the corresponding instance of your event handler for this process. If you subclass L{EventSift} and reimplement this method, no event will be forwarded at all unless you call the superclass implementation. If your filtering is based on the event type, there's a much easier way to do it: just implement a handler for it.
3.924202
4.257034
0.921816
if eventHandler is not None and not callable(eventHandler): raise TypeError("Event handler must be a callable object") try: wrong_type = issubclass(eventHandler, EventHandler) except TypeError: wrong_type = False if wrong_type: classname = str(eventHandler) msg = "Event handler must be an instance of class %s" msg += "rather than the %s class itself. (Missing parens?)" msg = msg % (classname, classname) raise TypeError(msg) try: previous = self.__eventHandler except AttributeError: previous = None self.__eventHandler = eventHandler return previous
def set_event_handler(self, eventHandler)
Set the event handler. @warn: This is normally not needed. Use with care! @type eventHandler: L{EventHandler} @param eventHandler: New event handler object, or C{None}. @rtype: L{EventHandler} @return: Previous event handler object, or C{None}. @raise TypeError: The event handler is of an incorrect type. @note: The L{eventHandler} parameter may be any callable Python object (for example a function, or an instance method). However you'll probably find it more convenient to use an instance of a subclass of L{EventHandler} here.
3.384156
3.474753
0.973927
eventCode = event.get_event_code() method = getattr(eventHandler, 'event', fallback) if eventCode == win32.EXCEPTION_DEBUG_EVENT: method = getattr(eventHandler, 'exception', method) method = getattr(eventHandler, event.eventMethod, method) return method
def get_handler_method(eventHandler, event, fallback=None)
Retrieves the appropriate callback method from an L{EventHandler} instance for the given L{Event} object. @type eventHandler: L{EventHandler} @param eventHandler: Event handler object whose methods we are examining. @type event: L{Event} @param event: Debugging event to be handled. @type fallback: callable @param fallback: (Optional) If no suitable method is found in the L{EventHandler} instance, return this value. @rtype: callable @return: Bound method that will handle the debugging event. Returns C{None} if no such method is defined.
4.929862
5.507479
0.895121
returnValue = None bCallHandler = True pre_handler = None post_handler = None eventCode = event.get_event_code() # Get the pre and post notification methods for exceptions. # If not found, the following steps take care of that. if eventCode == win32.EXCEPTION_DEBUG_EVENT: exceptionCode = event.get_exception_code() pre_name = self.__preExceptionNotifyCallbackName.get( exceptionCode, None) post_name = self.__postExceptionNotifyCallbackName.get( exceptionCode, None) if pre_name is not None: pre_handler = getattr(self, pre_name, None) if post_name is not None: post_handler = getattr(self, post_name, None) # Get the pre notification method for all other events. # This includes the exception event if no notify method was found # for this exception code. if pre_handler is None: pre_name = self.__preEventNotifyCallbackName.get(eventCode, None) if pre_name is not None: pre_handler = getattr(self, pre_name, pre_handler) # Get the post notification method for all other events. # This includes the exception event if no notify method was found # for this exception code. if post_handler is None: post_name = self.__postEventNotifyCallbackName.get(eventCode, None) if post_name is not None: post_handler = getattr(self, post_name, post_handler) # Call the pre-notify method only if it was defined. # If an exception is raised don't call the other methods. if pre_handler is not None: bCallHandler = pre_handler(event) # Call the user-defined event handler only if the pre-notify # method was not defined, or was and it returned True. try: if bCallHandler and self.__eventHandler is not None: try: returnValue = self.__eventHandler(event) except Exception: e = sys.exc_info()[1] msg = ("Event handler pre-callback %r" " raised an exception: %s") msg = msg % (self.__eventHandler, traceback.format_exc(e)) warnings.warn(msg, EventCallbackWarning) returnValue = None # Call the post-notify method if defined, even if an exception is # raised by the user-defined event handler. finally: if post_handler is not None: post_handler(event) # Return the value from the call to the user-defined event handler. # If not defined return None. return returnValue
def dispatch(self, event)
Sends event notifications to the L{Debug} object and the L{EventHandler} object provided by the user. The L{Debug} object will forward the notifications to it's contained snapshot objects (L{System}, L{Process}, L{Thread} and L{Module}) when appropriate. @warning: This method is called automatically from L{Debug.dispatch}. @see: L{Debug.cont}, L{Debug.loop}, L{Debug.wait} @type event: L{Event} @param event: Event object passed to L{Debug.dispatch}. @raise WindowsError: Raises an exception on error.
2.73363
2.821996
0.968687
for skip_path in config['skip']: if posixpath.abspath(posixpath.join(path, filename)) == posixpath.abspath(skip_path.replace('\\', '/')): return True position = os.path.split(filename) while position[1]: if position[1] in config['skip']: return True position = os.path.split(position[0]) for glob in config['skip_glob']: if fnmatch.fnmatch(filename, glob): return True return False
def should_skip(filename, config, path='/')
Returns True if the file should be skipped based on the passed in settings.
2.588725
2.558114
1.011966
'''formats an argument to be shown ''' s = str(arg) dot = s.rfind('.') if dot >= 0: s = s[dot + 1:] s = s.replace(';', '') s = s.replace('[]', 'Array') if len(s) > 0: c = s[0].lower() s = c + s[1:] return s
def format_arg(arg)
formats an argument to be shown
3.706898
3.738062
0.991663
''' Runs a function as a pydevd daemon thread (without any tracing in place). ''' t = PyDBDaemonThread(target_and_args=(func, args, kwargs)) t.name = '%s (pydevd daemon thread)' % (func.__name__,) t.start() return t
def run_as_pydevd_daemon_thread(func, *args, **kwargs)
Runs a function as a pydevd daemon thread (without any tracing in place).
5.186083
3.383881
1.532584
''' binds to a port, waits for the debugger to connect ''' s = socket(AF_INET, SOCK_STREAM) s.settimeout(None) try: from socket import SO_REUSEPORT s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) except ImportError: s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) s.bind(('', port)) pydev_log.info("Bound to port :%s", port) try: s.listen(1) newSock, _addr = s.accept() pydev_log.info("Connection accepted") # closing server socket is not necessary but we don't need it s.shutdown(SHUT_RDWR) s.close() return newSock except: pydev_log.exception("Could not bind to port: %s\n", port)
def start_server(port)
binds to a port, waits for the debugger to connect
3.139899
2.971517
1.056665
''' connects to a host/port ''' pydev_log.info("Connecting to %s:%s", host, port) s = socket(AF_INET, SOCK_STREAM) # Set TCP keepalive on an open socket. # It activates after 1 second (TCP_KEEPIDLE,) of idleness, # then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL), # and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds try: from socket import IPPROTO_TCP, SO_KEEPALIVE, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT s.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1) s.setsockopt(IPPROTO_TCP, TCP_KEEPIDLE, 1) s.setsockopt(IPPROTO_TCP, TCP_KEEPINTVL, 3) s.setsockopt(IPPROTO_TCP, TCP_KEEPCNT, 5) except ImportError: pass # May not be available everywhere. try: # 10 seconds default timeout timeout = int(os.environ.get('PYDEVD_CONNECT_TIMEOUT', 10)) s.settimeout(timeout) s.connect((host, port)) s.settimeout(None) # no timeout after connected pydev_log.info("Connected.") return s except: pydev_log.exception("Could not connect to %s: %s", host, port) raise
def start_client(host, port)
connects to a host/port
2.54094
2.552558
0.995448
''' :param VariablesRequest request: ''' arguments = request.arguments # : :type arguments: VariablesArguments variables_reference = arguments.variablesReference fmt = arguments.format if hasattr(fmt, 'to_dict'): fmt = fmt.to_dict() variables = [] try: variable = py_db.suspended_frames_manager.get_variable(variables_reference) except KeyError: pass else: for child_var in variable.get_children_variables(fmt=fmt): variables.append(child_var.get_var_data(fmt=fmt)) body = VariablesResponseBody(variables) variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body}) py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
def internal_get_variable_json(py_db, request)
:param VariablesRequest request:
5.569073
5.160079
1.079261
''' Changes the value of a variable ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: result = pydevd_vars.change_attr_expression(frame, attr, value, dbg) else: result = None xml = "<xml>" xml += pydevd_xml.var_to_xml(result, "") xml += "</xml>" cmd = dbg.cmd_factory.make_variable_changed_message(seq, xml) dbg.writer.add_command(cmd) except Exception: cmd = dbg.cmd_factory.make_error_message(seq, "Error changing variable attr:%s expression:%s traceback:%s" % (attr, value, get_exception_traceback_str())) dbg.writer.add_command(cmd)
def internal_change_variable(dbg, seq, thread_id, frame_id, scope, attr, value)
Changes the value of a variable
3.642255
3.73935
0.974034
''' The pydevd_vars.change_attr_expression(thread_id, frame_id, attr, value, dbg) can only deal with changing at a frame level, so, currently changing the contents of something in a different scope is currently not supported. TODO: make the resolvers structure resolve the name and change accordingly -- for instance, the list resolver should change the value considering the index. :param SetVariableRequest request: ''' # : :type arguments: SetVariableArguments arguments = request.arguments variables_reference = arguments.variablesReference fmt = arguments.format if hasattr(fmt, 'to_dict'): fmt = fmt.to_dict() # : :type frame: _FrameVariable frame_variable = py_db.suspended_frames_manager.get_variable(variables_reference) if hasattr(frame_variable, 'frame'): frame = frame_variable.frame pydevd_vars.change_attr_expression(frame, arguments.name, arguments.value, py_db) for child_var in frame_variable.get_children_variables(fmt=fmt): if child_var.get_name() == arguments.name: var_data = child_var.get_var_data(fmt=fmt) body = SetVariableResponseBody( value=var_data['value'], type=var_data['type'], variablesReference=var_data.get('variablesReference'), namedVariables=var_data.get('namedVariables'), indexedVariables=var_data.get('indexedVariables'), ) variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body}) py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True)) break # If it's gotten here we haven't been able to evaluate it properly. Let the client know. body = SetVariableResponseBody('') variables_response = pydevd_base_schema.build_response( request, kwargs={ 'body':body, 'success': False, 'message': 'Unable to change: %s.' % (arguments.name,) }) return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
def internal_change_variable_json(py_db, request)
The pydevd_vars.change_attr_expression(thread_id, frame_id, attr, value, dbg) can only deal with changing at a frame level, so, currently changing the contents of something in a different scope is currently not supported. TODO: make the resolvers structure resolve the name and change accordingly -- for instance, the list resolver should change the value considering the index. :param SetVariableRequest request:
5.144481
2.909221
1.768336
''' Converts request into python variable ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: hidden_ns = pydevconsole.get_ipython_hidden_vars() xml = "<xml>" xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns) del frame xml += "</xml>" cmd = dbg.cmd_factory.make_get_frame_message(seq, xml) dbg.writer.add_command(cmd) else: # pydevd_vars.dump_frames(thread_id) # don't print this error: frame not found: means that the client is not synchronized (but that's ok) cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd) except: cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd)
def internal_get_frame(dbg, seq, thread_id, frame_id)
Converts request into python variable
3.7826
3.565037
1.061027
''' gets the valid line numbers for use with set next statement ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: code = frame.f_code xml = "<xml>" if hasattr(code, 'co_lnotab'): lineno = code.co_firstlineno lnotab = code.co_lnotab for i in itertools.islice(lnotab, 1, len(lnotab), 2): if isinstance(i, int): lineno = lineno + i else: # in python 2 elements in co_lnotab are of type str lineno = lineno + ord(i) xml += "<line>%d</line>" % (lineno,) else: xml += "<line>%d</line>" % (frame.f_lineno,) del frame xml += "</xml>" cmd = dbg.cmd_factory.make_get_next_statement_targets_message(seq, xml) dbg.writer.add_command(cmd) else: cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd) except: cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd)
def internal_get_next_statement_targets(dbg, seq, thread_id, frame_id)
gets the valid line numbers for use with set next statement
2.608552
2.397619
1.087976
''' :param EvaluateRequest request: ''' # : :type arguments: EvaluateArguments arguments = request.arguments expression = arguments.expression frame_id = arguments.frameId context = arguments.context fmt = arguments.format if hasattr(fmt, 'to_dict'): fmt = fmt.to_dict() if IS_PY2 and isinstance(expression, unicode): try: expression = expression.encode('utf-8') except: _evaluate_response(py_db, request, '', error_message='Expression is not valid utf-8.') raise frame = py_db.find_frame(thread_id, frame_id) result = pydevd_vars.evaluate_expression(py_db, frame, expression, is_exec=False) is_error = isinstance(result, ExceptionOnEvaluate) if is_error: if context == 'hover': _evaluate_response(py_db, request, result='') return elif context == 'repl': try: pydevd_vars.evaluate_expression(py_db, frame, expression, is_exec=True) except Exception as ex: err = ''.join(traceback.format_exception_only(type(ex), ex)) _evaluate_response(py_db, request, result='', error_message=err) return # No result on exec. _evaluate_response(py_db, request, result='') return # Ok, we have the result (could be an error), let's put it into the saved variables. frame_tracker = py_db.suspended_frames_manager.get_frame_tracker(thread_id) if frame_tracker is None: # This is not really expected. _evaluate_response(py_db, request, result, error_message='Thread id: %s is not current thread id.' % (thread_id,)) return variable = frame_tracker.obtain_as_variable(expression, result) var_data = variable.get_var_data(fmt=fmt) body = pydevd_schema.EvaluateResponseBody( result=var_data['value'], variablesReference=var_data.get('variablesReference', 0), type=var_data.get('type'), presentationHint=var_data.get('presentationHint'), namedVariables=var_data.get('namedVariables'), indexedVariables=var_data.get('indexedVariables'), ) variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body}) py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
def internal_evaluate_expression_json(py_db, request, thread_id)
:param EvaluateRequest request:
3.659287
3.571719
1.024517
''' gets the value of a variable ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: result = pydevd_vars.evaluate_expression(dbg, frame, expression, is_exec) if attr_to_set_result != "": pydevd_vars.change_attr_expression(frame, attr_to_set_result, expression, dbg, result) else: result = None xml = "<xml>" xml += pydevd_xml.var_to_xml(result, expression, trim_if_too_big) xml += "</xml>" cmd = dbg.cmd_factory.make_evaluate_expression_message(seq, xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc) dbg.writer.add_command(cmd)
def internal_evaluate_expression(dbg, seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result)
gets the value of a variable
2.912277
2.860282
1.018179
''' Note that if the column is >= 0, the act_tok is considered text and the actual activation token/qualifier is computed in this command. ''' try: remove_path = None try: qualifier = u'' if column >= 0: token_and_qualifier = extract_token_and_qualifier(act_tok, line, column) act_tok = token_and_qualifier[0] if act_tok: act_tok += u'.' qualifier = token_and_qualifier[1] frame = dbg.find_frame(thread_id, frame_id) if frame is not None: if IS_PY2: if not isinstance(act_tok, bytes): act_tok = act_tok.encode('utf-8') if not isinstance(qualifier, bytes): qualifier = qualifier.encode('utf-8') completions = _pydev_completer.generate_completions(frame, act_tok) # Note that qualifier and start are only actually valid for the # Debug Adapter Protocol (for the line-based protocol, the IDE # is required to filter the completions returned). cmd = dbg.cmd_factory.make_get_completions_message( seq, completions, qualifier, start=column - len(qualifier)) dbg.writer.add_command(cmd) else: cmd = dbg.cmd_factory.make_error_message(seq, "internal_get_completions: Frame not found: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd) finally: if remove_path is not None: sys.path.remove(remove_path) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc) dbg.writer.add_command(cmd)
def internal_get_completions(dbg, seq, thread_id, frame_id, act_tok, line=-1, column=-1)
Note that if the column is >= 0, the act_tok is considered text and the actual activation token/qualifier is computed in this command.
3.631991
3.018991
1.203048
''' Fetch the variable description stub from the debug console ''' try: frame = dbg.find_frame(thread_id, frame_id) description = pydevd_console.get_description(frame, thread_id, frame_id, expression) description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t')) description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc) dbg.writer.add_command(cmd)
def internal_get_description(dbg, seq, thread_id, frame_id, expression)
Fetch the variable description stub from the debug console
4.436935
4.015064
1.105072
''' :return ExceptionInfoResponse ''' thread = pydevd_find_thread_by_id(thread_id) additional_info = set_additional_thread_info(thread) topmost_frame = additional_info.get_topmost_frame(thread) frames = [] exc_type = None exc_desc = None if topmost_frame is not None: frame_id_to_lineno = {} try: trace_obj = None frame = topmost_frame while frame is not None: if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'): arg = frame.f_locals.get('arg', None) if arg is not None: exc_type, exc_desc, trace_obj = arg break frame = frame.f_back while trace_obj.tb_next is not None: trace_obj = trace_obj.tb_next info = dbg.suspended_frames_manager.get_topmost_frame_and_frame_id_to_line(thread_id) if info is not None: topmost_frame, frame_id_to_lineno = info if trace_obj is not None: for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno in iter_visible_frames_info( dbg, trace_obj.tb_frame, frame_id_to_lineno): line_text = linecache.getline(original_filename, lineno) # Never filter out plugin frames! if not getattr(frame, 'IS_PLUGIN_FRAME', False): if dbg.is_files_filter_enabled and dbg.apply_files_filter(frame, original_filename, False): continue frames.append((filename_in_utf8, lineno, method_name, line_text)) finally: topmost_frame = None name = 'exception: type unknown' if exc_type is not None: try: name = exc_type.__qualname__ except: try: name = exc_type.__name__ except: try: name = str(exc_type) except: pass description = 'exception: no description' if exc_desc is not None: try: description = str(exc_desc) except: pass stack_str = ''.join(traceback.format_list(frames[-max_frames:])) # This is an extra bit of data used by Visual Studio source_path = frames[0][0] if frames else '' if thread.stop_reason == CMD_STEP_CAUGHT_EXCEPTION: break_mode = pydevd_schema.ExceptionBreakMode.ALWAYS else: break_mode = pydevd_schema.ExceptionBreakMode.UNHANDLED response = pydevd_schema.ExceptionInfoResponse( request_seq=request_seq, success=True, command='exceptionInfo', body=pydevd_schema.ExceptionInfoResponseBody( exceptionId=name, description=description, breakMode=break_mode, details=pydevd_schema.ExceptionDetails( message=description, typeName=name, stackTrace=stack_str, source=source_path ) ) ) return response
def build_exception_info_response(dbg, thread_id, request_seq, set_additional_thread_info, iter_visible_frames_info, max_frames)
:return ExceptionInfoResponse
3.106526
3.104973
1.0005
''' Fetch exception details ''' try: response = build_exception_info_response(dbg, thread_id, request.seq, set_additional_thread_info, iter_visible_frames_info, max_frames) except: exc = get_exception_traceback_str() response = pydevd_base_schema.build_response(request, kwargs={ 'success': False, 'message': exc, 'body':{} }) dbg.writer.add_command(NetCommand(CMD_RETURN, 0, response, is_json=True))
def internal_get_exception_details_json(dbg, request, thread_id, max_frames, set_additional_thread_info=None, iter_visible_frames_info=None)
Fetch exception details
5.503833
5.636117
0.976529
''' just loop and write responses ''' try: while True: try: try: cmd = self.cmdQueue.get(1, 0.1) except _queue.Empty: if self.killReceived: try: self.sock.shutdown(SHUT_WR) self.sock.close() except: pass return # break if queue is empty and killReceived else: continue except: # pydev_log.info('Finishing debug communication...(1)') # when liberating the thread here, we could have errors because we were shutting down # but the thread was still not liberated return cmd.send(self.sock) if cmd.id == CMD_EXIT: break if time is None: break # interpreter shutdown time.sleep(self.timeout) except Exception: GlobalDebuggerHolder.global_dbg.finish_debugging_session() if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0: pydev_log_exception()
def _on_run(self)
just loop and write responses
8.817436
7.890079
1.117535
'''By default, it must be in the same thread to be executed ''' return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def can_be_executed_by(self, thread_id)
By default, it must be in the same thread to be executed
7.048354
4.146218
1.699948
''' Converts request into python variable ''' try: xml = StringIO.StringIO() xml.write("<xml>") _typeName, val_dict = pydevd_vars.resolve_compound_variable_fields( dbg, self.thread_id, self.frame_id, self.scope, self.attributes) if val_dict is None: val_dict = {} # assume properly ordered if resolver returns 'OrderedDict' # check type as string to support OrderedDict backport for older Python keys = dict_keys(val_dict) if not (_typeName == "OrderedDict" or val_dict.__class__.__name__ == "OrderedDict" or IS_PY36_OR_GREATER): keys.sort(key=compare_object_attrs_key) for k in keys: val = val_dict[k] evaluate_full_value = pydevd_xml.should_evaluate_full_value(val) xml.write(pydevd_xml.var_to_xml(val, k, evaluate_full_value=evaluate_full_value)) xml.write("</xml>") cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml.getvalue()) xml.close() dbg.writer.add_command(cmd) except Exception: cmd = dbg.cmd_factory.make_error_message( self.sequence, "Error resolving variables %s" % (get_exception_traceback_str(),)) dbg.writer.add_command(cmd)
def do_it(self, dbg)
Converts request into python variable
5.299749
5.038902
1.051767
''' Create an XML for console output, error and more (true/false) <xml> <output message=output_message></output> <error message=error_message></error> <more>true/false</more> </xml> ''' try: frame = dbg.find_frame(self.thread_id, self.frame_id) if frame is not None: console_message = pydevd_console.execute_console_command( frame, self.thread_id, self.frame_id, self.line, self.buffer_output) cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml()) else: from _pydevd_bundle.pydevd_console import ConsoleMessage console_message = ConsoleMessage() console_message.add_console_message( pydevd_console.CONSOLE_ERROR, "Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id), ) cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml()) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc) dbg.writer.add_command(cmd)
def do_it(self, dbg)
Create an XML for console output, error and more (true/false) <xml> <output message=output_message></output> <error message=error_message></error> <more>true/false</more> </xml>
4.104992
2.948045
1.392445