idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
62,400 | def importevla ( asdm , ms ) : from . scripting import CasapyScript bdfstem = os . listdir ( os . path . join ( asdm , 'ASDMBinary' ) ) [ 0 ] bdf = os . path . join ( asdm , 'ASDMBinary' , bdfstem ) tbuff = None with open ( bdf , 'rb' ) as f : for linenum , line in enumerate ( f ) : if linenum > 60 : raise PKError ( 'cannot find integration time info in %s' , bdf ) if not line . startswith ( b'<sdmDataSubsetHeader' ) : continue try : i1 = line . index ( b'<interval>' ) + len ( b'<interval>' ) i2 = line . index ( b'</interval>' ) if i2 <= i1 : raise ValueError ( ) except ValueError : raise PKError ( 'cannot parse integration time info in %s' , bdf ) tbuff = float ( line [ i1 : i2 ] ) * 1.5e-9 break if tbuff is None : raise PKError ( 'found no integration time info' ) print ( 'importevla: %s -> %s with tbuff=%.2f' % ( asdm , ms , tbuff ) ) script = os . path . join ( os . path . dirname ( __file__ ) , 'cscript_importevla.py' ) with CasapyScript ( script , asdm = asdm , ms = ms , tbuff = tbuff ) as cs : pass | Convert an EVLA low - level SDM dataset to Measurement Set format . |
62,401 | def listobs ( vis ) : def inner_list ( sink ) : try : ms = util . tools . ms ( ) ms . open ( vis ) ms . summary ( verbose = True ) ms . close ( ) except Exception as e : sink . post ( b'listobs failed: %s' % e , priority = b'SEVERE' ) for line in util . forkandlog ( inner_list ) : info = line . rstrip ( ) . split ( '\t' , 3 ) if len ( info ) > 3 : yield info [ 3 ] else : yield '' | Textually describe the contents of a measurement set . |
62,402 | def mjd2date ( mjd , precision = 3 ) : from astropy . time import Time dt = Time ( mjd , format = 'mjd' , scale = 'utc' ) . to_datetime ( ) fracsec = ( '%.*f' % ( precision , 1e-6 * dt . microsecond ) ) . split ( '.' ) [ 1 ] return '%04d/%02d/%02d/%02d:%02d:%02d.%s' % ( dt . year , dt . month , dt . day , dt . hour , dt . minute , dt . second , fracsec ) | Convert an MJD to a data string in the format used by CASA . |
62,403 | def plotants ( vis , figfile ) : from . scripting import CasapyScript script = os . path . join ( os . path . dirname ( __file__ ) , 'cscript_plotants.py' ) with CasapyScript ( script , vis = vis , figfile = figfile ) as cs : pass | Plot the physical layout of the antennas described in the MS . |
62,404 | def latexify ( obj , ** kwargs ) : if hasattr ( obj , '__pk_latex__' ) : return obj . __pk_latex__ ( ** kwargs ) if isinstance ( obj , text_type ) : from . unicode_to_latex import unicode_to_latex return unicode_to_latex ( obj ) if isinstance ( obj , bool ) : raise ValueError ( 'no well-defined LaTeXification of bool %r' % obj ) if isinstance ( obj , float ) : nplaces = kwargs . get ( 'nplaces' ) if nplaces is None : return '$%f$' % obj return '$%.*f$' % ( nplaces , obj ) if isinstance ( obj , int ) : return '$%d$' % obj if isinstance ( obj , binary_type ) : if all ( c in _printable_ascii for c in obj ) : return obj . decode ( 'ascii' ) raise ValueError ( 'no safe LaTeXification of binary string %r' % obj ) raise ValueError ( 'can\'t LaTeXify %r' % obj ) | Render an object in LaTeX appropriately . |
62,405 | def latexify_n2col ( x , nplaces = None , ** kwargs ) : if nplaces is not None : t = '%.*f' % ( nplaces , x ) else : t = '%f' % x if '.' not in t : return '$%s$ &' % t left , right = t . split ( '.' ) return '$%s$ & $.%s$' % ( left , right ) | Render a number into LaTeX in a 2 - column format where the columns split immediately to the left of the decimal point . This gives nice alignment of numbers in a table . |
62,406 | def latexify_u3col ( obj , ** kwargs ) : if hasattr ( obj , '__pk_latex_u3col__' ) : return obj . __pk_latex_u3col__ ( ** kwargs ) raise ValueError ( 'can\'t LaTeXify %r in 3-column uncertain format' % obj ) | Convert an object to special LaTeX for uncertainty tables . |
62,407 | def latexify_l3col ( obj , ** kwargs ) : if hasattr ( obj , '__pk_latex_l3col__' ) : return obj . __pk_latex_l3col__ ( ** kwargs ) if isinstance ( obj , bool ) : raise ValueError ( 'no well-defined l3col LaTeXification of bool %r' % obj ) if isinstance ( obj , float ) : return '&' + latexify_n2col ( obj , ** kwargs ) if isinstance ( obj , int ) : return '& $%d$ &' % obj raise ValueError ( 'can\'t LaTeXify %r in 3-column limit format' % obj ) | Convert an object to special LaTeX for limit tables . |
62,408 | def read ( path , tabwidth = 8 , ** kwargs ) : datamode = False fixedcols = { } for text in _trimmedlines ( path , ** kwargs ) : text = text . expandtabs ( tabwidth ) if datamode : h = Holder ( ) h . set ( ** fixedcols ) for name , cslice , parser in info : try : v = parser ( text [ cslice ] . strip ( ) ) except : reraise_context ( 'while parsing "%s"' , text [ cslice ] . strip ( ) ) h . set_one ( name , v ) yield h elif text [ 0 ] != '@' : padnamekind , padval = text . split ( '=' , 1 ) name , parser = _getparser ( padnamekind . strip ( ) ) fixedcols [ name ] = parser ( padval . strip ( ) ) else : n = len ( text ) assert n > 1 start = 0 info = [ ] while start < n : end = start + 1 while end < n and ( not text [ end ] . isspace ( ) ) : end += 1 if start == 0 : namekind = text [ start + 1 : end ] else : namekind = text [ start : end ] while end < n and text [ end ] . isspace ( ) : end += 1 name , parser = _getparser ( namekind ) if parser is None : skippedlast = True else : skippedlast = False info . append ( ( name , slice ( start , end ) , parser ) ) start = end datamode = True if not skippedlast : lname , lslice , lparser = info [ - 1 ] info [ - 1 ] = lname , slice ( lslice . start , None ) , lparser | Read a typed tabular text file into a stream of Holders . |
62,409 | def write ( stream , items , fieldnames , tabwidth = 8 ) : if isinstance ( fieldnames , six . string_types ) : fieldnames = fieldnames . split ( ) maxlens = [ 0 ] * len ( fieldnames ) items = list ( items ) coltypes = [ None ] * len ( fieldnames ) for i in items : for idx , fn in enumerate ( fieldnames ) : val = i . get ( fn ) if val is None : continue typetag , text , inexact = msmt . fmtinfo ( val ) maxlens [ idx ] = max ( maxlens [ idx ] , len ( text ) + 1 ) if coltypes [ idx ] is None : coltypes [ idx ] = typetag continue if coltypes [ idx ] == typetag : continue if coltypes [ idx ] [ - 1 ] == 'f' and typetag [ - 1 ] == 'u' : if coltypes [ idx ] [ : - 1 ] == typetag [ : - 1 ] : coltypes [ idx ] = coltypes [ idx ] [ : - 1 ] + 'u' continue if coltypes [ idx ] [ - 1 ] == 'u' and typetag [ - 1 ] == 'f' : if coltypes [ idx ] [ : - 1 ] == typetag [ : - 1 ] : continue raise PKError ( 'irreconcilable column types: %s and %s' , coltypes [ idx ] , typetag ) headers = list ( fieldnames ) headers [ 0 ] = '@' + headers [ 0 ] for idx , fn in enumerate ( fieldnames ) : if coltypes [ idx ] != '' : headers [ idx ] += ':' + coltypes [ idx ] maxlens [ idx ] = max ( maxlens [ idx ] , len ( headers [ idx ] ) ) widths = [ tabwidth * ( ( k + tabwidth - 1 ) // tabwidth ) for k in maxlens ] print ( '' . join ( _tabpad ( h , widths [ idx ] , tabwidth ) for ( idx , h ) in enumerate ( headers ) ) , file = stream ) def ustr ( i , f ) : v = i . get ( f ) if v is None : return '' return msmt . fmtinfo ( v ) [ 1 ] for i in items : print ( '' . join ( _tabpad ( ustr ( i , fn ) , widths [ idx ] , tabwidth ) for ( idx , fn ) in enumerate ( fieldnames ) ) , file = stream ) | Write a typed tabular text file to the specified stream . |
62,410 | def vizread ( descpath , descsection , tabpath , tabwidth = 8 , ** kwargs ) : from . inifile import read as iniread cols = [ ] for i in iniread ( descpath ) : if i . section != descsection : continue for field , desc in six . iteritems ( i . __dict__ ) : if field == 'section' : continue a = desc . split ( ) idx0 = int ( a [ 0 ] ) - 1 if len ( a ) == 1 : cols . append ( ( field , slice ( idx0 , idx0 + 1 ) , msmt . parsers [ 's' ] ) ) continue if len ( a ) == 2 : parser = msmt . parsers [ 's' ] else : parser = msmt . parsers [ a [ 2 ] ] cols . append ( ( field , slice ( idx0 , int ( a [ 1 ] ) ) , parser ) ) for text in _trimmedlines ( tabpath , ** kwargs ) : text = text . expandtabs ( tabwidth ) h = Holder ( ) for name , cslice , parser in cols : try : v = parser ( text [ cslice ] . strip ( ) ) except : reraise_context ( 'while parsing "%s"' , text [ cslice ] . strip ( ) ) h . set_one ( name , v ) yield h | Read a headerless tabular text file into a stream of Holders . |
62,411 | def _broadcast_shapes ( s1 , s2 ) : n1 = len ( s1 ) n2 = len ( s2 ) n = max ( n1 , n2 ) res = [ 1 ] * n for i in range ( n ) : if i >= n1 : c1 = 1 else : c1 = s1 [ n1 - 1 - i ] if i >= n2 : c2 = 1 else : c2 = s2 [ n2 - 1 - i ] if c1 == 1 : rc = c2 elif c2 == 1 or c1 == c2 : rc = c1 else : raise ValueError ( 'array shapes %r and %r are not compatible' % ( s1 , s2 ) ) res [ n - 1 - i ] = rc return tuple ( res ) | Given array shapes s1 and s2 compute the shape of the array that would result from broadcasting them together . |
62,412 | def set_data ( self , data , invsigma = None ) : self . data = np . array ( data , dtype = np . float , ndmin = 1 ) if invsigma is None : self . invsigma = np . ones ( self . data . shape ) else : i = np . array ( invsigma , dtype = np . float ) self . invsigma = np . broadcast_arrays ( self . data , i ) [ 1 ] if self . invsigma . shape != self . data . shape : raise ValueError ( 'data values and inverse-sigma values must have same shape' ) return self | Set the data to be modeled . |
62,413 | def print_soln ( self ) : lmax = reduce ( max , ( len ( x ) for x in self . pnames ) , len ( 'r chi sq' ) ) if self . puncerts is None : for pn , val in zip ( self . pnames , self . params ) : print ( '%s: %14g' % ( pn . rjust ( lmax ) , val ) ) else : for pn , val , err in zip ( self . pnames , self . params , self . puncerts ) : frac = abs ( 100. * err / val ) print ( '%s: %14g +/- %14g (%.2f%%)' % ( pn . rjust ( lmax ) , val , err , frac ) ) if self . rchisq is not None : print ( '%s: %14g' % ( 'r chi sq' . rjust ( lmax ) , self . rchisq ) ) elif self . chisq is not None : print ( '%s: %14g' % ( 'chi sq' . rjust ( lmax ) , self . chisq ) ) else : print ( '%s: unknown/undefined' % ( 'r chi sq' . rjust ( lmax ) ) ) return self | Print information about the model solution . |
62,414 | def show_corr ( self ) : "Show the parameter correlation matrix with `pwkit.ndshow_gtk3`." from . ndshow_gtk3 import view d = np . diag ( self . covar ) ** - 0.5 corr = self . covar * d [ np . newaxis , : ] * d [ : , np . newaxis ] view ( corr , title = 'Correlation Matrix' ) | Show the parameter correlation matrix with pwkit . ndshow_gtk3 . |
62,415 | def set_func ( self , func , pnames , args = ( ) ) : from . lmmin import Problem self . func = func self . _args = args self . pnames = list ( pnames ) self . lm_prob = Problem ( len ( self . pnames ) ) return self | Set the model function to use an efficient but tedious calling convention . |
62,416 | def set_simple_func ( self , func , args = ( ) ) : code = get_function_code ( func ) npar = code . co_argcount - len ( args ) pnames = code . co_varnames [ : npar ] def wrapper ( params , * args ) : return func ( * ( tuple ( params ) + args ) ) return self . set_func ( wrapper , pnames , args ) | Set the model function to use a simple but somewhat inefficient calling convention . |
62,417 | def make_frozen_func ( self , params ) : params = np . array ( params , dtype = np . float , ndmin = 1 ) from functools import partial return partial ( self . func , params ) | Returns a model function frozen to the specified parameter values . |
62,418 | def solve ( self , guess ) : guess = np . array ( guess , dtype = np . float , ndmin = 1 ) f = self . func args = self . _args def lmfunc ( params , vec ) : vec [ : ] = f ( params , * args ) . flatten ( ) self . lm_prob . set_residual_func ( self . data . flatten ( ) , self . invsigma . flatten ( ) , lmfunc , None ) self . lm_soln = soln = self . lm_prob . solve ( guess ) self . params = soln . params self . puncerts = soln . perror self . covar = soln . covar self . mfunc = self . make_frozen_func ( soln . params ) self . resids = soln . fvec . reshape ( self . data . shape ) / self . invsigma self . mdata = self . data - self . resids self . chisq = ( self . lm_soln . fvec ** 2 ) . sum ( ) if soln . ndof > 0 : self . rchisq = self . chisq / soln . ndof return self | Solve for the parameters using an initial guess . |
62,419 | def as_nonlinear ( self , params = None ) : if params is None : params = self . params nlm = Model ( None , self . data , self . invsigma ) nlm . set_func ( lambda p , x : npoly . polyval ( x , p ) , self . pnames , args = ( self . x , ) ) if params is not None : nlm . solve ( params ) return nlm | Return a Model equivalent to this object . The nonlinear solver is less efficient but lets you freeze parameters compute uncertainties etc . |
62,420 | def files ( self ) : if self . topic . has_file : yield self . topic . file . file_url for reply in self . replies : if reply . has_file : yield reply . file . file_url | Returns the URLs of all files attached to posts in the thread . |
62,421 | def thumbs ( self ) : if self . topic . has_file : yield self . topic . file . thumbnail_url for reply in self . replies : if reply . has_file : yield reply . file . thumbnail_url | Returns the URLs of all thumbnails in the thread . |
62,422 | def filenames ( self ) : if self . topic . has_file : yield self . topic . file . filename for reply in self . replies : if reply . has_file : yield reply . file . filename | Returns the filenames of all files attached to posts in the thread . |
62,423 | def thumbnames ( self ) : if self . topic . has_file : yield self . topic . file . thumbnail_fname for reply in self . replies : if reply . has_file : yield reply . file . thumbnail_fname | Returns the filenames of all thumbnails in the thread . |
62,424 | def update ( self , force = False ) : if self . is_404 and not force : return 0 if self . _last_modified : headers = { 'If-Modified-Since' : self . _last_modified } else : headers = None try : res = self . _board . _requests_session . get ( self . _api_url , headers = headers ) except : return 0 if res . status_code == 304 : return 0 elif res . status_code == 404 : self . is_404 = True self . _board . _thread_cache . pop ( self . id , None ) return 0 elif res . status_code == 200 : if self . is_404 : self . is_404 = False self . _board . _thread_cache [ self . id ] = self self . want_update = False self . omitted_images = 0 self . omitted_posts = 0 self . _last_modified = res . headers [ 'Last-Modified' ] posts = res . json ( ) [ 'posts' ] original_post_count = len ( self . replies ) self . topic = Post ( self , posts [ 0 ] ) if self . last_reply_id and not force : self . replies . extend ( Post ( self , p ) for p in posts if p [ 'no' ] > self . last_reply_id ) else : self . replies [ : ] = [ Post ( self , p ) for p in posts [ 1 : ] ] new_post_count = len ( self . replies ) post_count_delta = new_post_count - original_post_count if not post_count_delta : return 0 self . last_reply_id = self . replies [ - 1 ] . post_number return post_count_delta else : res . raise_for_status ( ) | Fetch new posts from the server . |
62,425 | def cas_a ( freq_mhz , year ) : snu = 10. ** ( 5.745 - 0.770 * np . log10 ( freq_mhz ) ) dnu = 0.01 * ( 0.07 - 0.30 * np . log10 ( freq_mhz ) ) loss = ( 1 - dnu ) ** ( year - 1980. ) return snu * loss | Return the flux of Cas A given a frequency and the year of observation . Based on the formula given in Baars et al . 1977 . |
62,426 | def init_cas_a ( year ) : year = float ( year ) models [ 'CasA' ] = lambda f : cas_a ( f , year ) | Insert an entry for Cas A into the table of models . Need to specify the year of the observations to account for the time variation of Cas A s emission . |
62,427 | def add_from_vla_obs ( src , Lband , Cband ) : if src in models : raise PKError ( 'already have a model for ' + src ) fL = np . log10 ( 1425 ) fC = np . log10 ( 4860 ) lL = np . log10 ( Lband ) lC = np . log10 ( Cband ) A = ( lL - lC ) / ( fL - fC ) B = lL - A * fL def fluxdens ( freq_mhz ) : return 10. ** ( A * np . log10 ( freq_mhz ) + B ) def spindex ( freq_mhz ) : return A models [ src ] = fluxdens spindexes [ src ] = spindex | Add an entry into the models table for a source based on L - band and C - band flux densities . |
62,428 | def databiv ( xy , coordouter = False , ** kwargs ) : xy = np . asarray ( xy ) if xy . ndim != 2 : raise ValueError ( '"xy" must be a 2D array' ) if coordouter : if xy . shape [ 0 ] != 2 : raise ValueError ( 'if "coordouter" is True, first axis of "xy" ' 'must have size 2' ) else : if xy . shape [ 1 ] != 2 : raise ValueError ( 'if "coordouter" is False, second axis of "xy" ' 'must have size 2' ) cov = np . cov ( xy , rowvar = coordouter , ** kwargs ) sx , sy = np . sqrt ( np . diag ( cov ) ) cxy = cov [ 0 , 1 ] return _bivcheck ( sx , sy , cxy ) | Compute the main parameters of a bivariate distribution from data . The parameters are returned in the same format as used in the rest of this module . |
62,429 | def bivrandom ( x0 , y0 , sx , sy , cxy , size = None ) : from numpy . random import multivariate_normal as mvn p0 = np . asarray ( [ x0 , y0 ] ) cov = np . asarray ( [ [ sx ** 2 , cxy ] , [ cxy , sy ** 2 ] ] ) return mvn ( p0 , cov , size ) | Compute random values distributed according to the specified bivariate distribution . |
62,430 | def bivconvolve ( sx_a , sy_a , cxy_a , sx_b , sy_b , cxy_b ) : _bivcheck ( sx_a , sy_a , cxy_a ) _bivcheck ( sx_b , sy_b , cxy_b ) sx_c = np . sqrt ( sx_a ** 2 + sx_b ** 2 ) sy_c = np . sqrt ( sy_a ** 2 + sy_b ** 2 ) cxy_c = cxy_a + cxy_b return _bivcheck ( sx_c , sy_c , cxy_c ) | Given two independent bivariate distributions compute a bivariate distribution corresponding to their convolution . |
62,431 | def ellplot ( mjr , mnr , pa ) : _ellcheck ( mjr , mnr , pa ) import omega as om th = np . linspace ( 0 , 2 * np . pi , 200 ) x , y = ellpoint ( mjr , mnr , pa , th ) return om . quickXY ( x , y , 'mjr=%f mnr=%f pa=%f' % ( mjr , mnr , pa * 180 / np . pi ) ) | Utility for debugging . |
62,432 | def abcd2 ( x0 , y0 , a , b , c , x , y ) : _abccheck ( a , b , c ) dx , dy = x - x0 , y - y0 return - 2 * ( a * dx ** 2 + b * dx * dy + c * dy ** 2 ) | Given an 2D Gaussian expressed as the ABC polynomial coefficients compute a squared distance parameter such that |
62,433 | def eigh_robust ( a , b = None , eigvals = None , eigvals_only = False , overwrite_a = False , overwrite_b = False , turbo = True , check_finite = True ) : kwargs = dict ( eigvals = eigvals , eigvals_only = eigvals_only , turbo = turbo , check_finite = check_finite , overwrite_a = overwrite_a , overwrite_b = overwrite_b ) if b is None : return linalg . eigh ( a , ** kwargs ) kwargs_b = dict ( turbo = turbo , check_finite = check_finite , overwrite_a = overwrite_b ) S , U = linalg . eigh ( b , ** kwargs_b ) S [ S <= 0 ] = np . inf Sinv = 1. / np . sqrt ( S ) W = Sinv [ : , None ] * np . dot ( U . T , np . dot ( a , U ) ) * Sinv output = linalg . eigh ( W , ** kwargs ) if eigvals_only : return output else : evals , evecs = output return evals , np . dot ( U , Sinv [ : , None ] * evecs ) | Robustly solve the Hermitian generalized eigenvalue problem |
62,434 | def _compute_projection ( self , X , W ) : X = check_array ( X ) D = np . diag ( W . sum ( 1 ) ) L = D - W evals , evecs = eigh_robust ( np . dot ( X . T , np . dot ( L , X ) ) , np . dot ( X . T , np . dot ( D , X ) ) , eigvals = ( 0 , self . n_components - 1 ) ) return evecs | Compute the LPP projection matrix |
62,435 | def find_common_dtype ( * args ) : dtypes = [ ] for arg in args : if type ( arg ) is numpy . ndarray or isspmatrix ( arg ) or isinstance ( arg , LinearOperator ) : if hasattr ( arg , 'dtype' ) : dtypes . append ( arg . dtype ) else : warnings . warn ( 'object %s does not have a dtype.' % arg . __repr__ ) return numpy . find_common_type ( dtypes , [ ] ) | Returns common dtype of numpy and scipy objects . |
62,436 | def inner ( X , Y , ip_B = None ) : if ip_B is None or isinstance ( ip_B , IdentityLinearOperator ) : return numpy . dot ( X . T . conj ( ) , Y ) ( N , m ) = X . shape ( _ , n ) = Y . shape try : B = get_linearoperator ( ( N , N ) , ip_B ) except TypeError : return ip_B ( X , Y ) if m > n : return numpy . dot ( ( B * X ) . T . conj ( ) , Y ) else : return numpy . dot ( X . T . conj ( ) , B * Y ) | Euclidean and non - Euclidean inner product . |
62,437 | def norm_squared ( x , Mx = None , inner_product = ip_euclid ) : assert ( len ( x . shape ) == 2 ) if Mx is None : rho = inner_product ( x , x ) else : assert ( len ( Mx . shape ) == 2 ) rho = inner_product ( x , Mx ) if rho . shape == ( 1 , 1 ) : if abs ( rho [ 0 , 0 ] . imag ) > abs ( rho [ 0 , 0 ] ) * 1e-10 or rho [ 0 , 0 ] . real < 0.0 : raise InnerProductError ( ( '<x,Mx> = %g. Is the inner product ' 'indefinite?' ) % rho [ 0 , 0 ] ) return numpy . linalg . norm ( rho , 2 ) | Compute the norm^2 w . r . t . to a given scalar product . |
62,438 | def get_linearoperator ( shape , A , timer = None ) : ret = None import scipy . sparse . linalg as scipylinalg if isinstance ( A , LinearOperator ) : ret = A elif A is None : ret = IdentityLinearOperator ( shape ) elif isinstance ( A , numpy . ndarray ) or isspmatrix ( A ) : ret = MatrixLinearOperator ( A ) elif isinstance ( A , numpy . matrix ) : ret = MatrixLinearOperator ( numpy . atleast_2d ( numpy . asarray ( A ) ) ) elif isinstance ( A , scipylinalg . LinearOperator ) : if not hasattr ( A , 'dtype' ) : raise ArgumentError ( 'scipy LinearOperator has no dtype.' ) ret = LinearOperator ( A . shape , dot = A . matvec , dot_adj = A . rmatvec , dtype = A . dtype ) else : raise TypeError ( 'type not understood' ) if A is not None and not isinstance ( A , IdentityLinearOperator ) and timer is not None : ret = TimedLinearOperator ( ret , timer ) if shape != ret . shape : raise LinearOperatorError ( 'shape mismatch' ) return ret | Enhances aslinearoperator if A is None . |
62,439 | def orthonormality ( V , ip_B = None ) : return norm ( numpy . eye ( V . shape [ 1 ] ) - inner ( V , V , ip_B = ip_B ) ) | Measure orthonormality of given basis . |
62,440 | def arnoldi_res ( A , V , H , ip_B = None ) : N = V . shape [ 0 ] invariant = H . shape [ 0 ] == H . shape [ 1 ] A = get_linearoperator ( ( N , N ) , A ) if invariant : res = A * V - numpy . dot ( V , H ) else : res = A * V [ : , : - 1 ] - numpy . dot ( V , H ) return norm ( res , ip_B = ip_B ) | Measure Arnoldi residual . |
62,441 | def qr ( X , ip_B = None , reorthos = 1 ) : if ip_B is None and X . shape [ 1 ] > 0 : return scipy . linalg . qr ( X , mode = 'economic' ) else : ( N , k ) = X . shape Q = X . copy ( ) R = numpy . zeros ( ( k , k ) , dtype = X . dtype ) for i in range ( k ) : for reortho in range ( reorthos + 1 ) : for j in range ( i ) : alpha = inner ( Q [ : , [ j ] ] , Q [ : , [ i ] ] , ip_B = ip_B ) [ 0 , 0 ] R [ j , i ] += alpha Q [ : , [ i ] ] -= alpha * Q [ : , [ j ] ] R [ i , i ] = norm ( Q [ : , [ i ] ] , ip_B = ip_B ) if R [ i , i ] >= 1e-15 : Q [ : , [ i ] ] /= R [ i , i ] return Q , R | QR factorization with customizable inner product . |
62,442 | def angles ( F , G , ip_B = None , compute_vectors = False ) : reverse = False if F . shape [ 1 ] < G . shape [ 1 ] : reverse = True F , G = G , F QF , _ = qr ( F , ip_B = ip_B ) QG , _ = qr ( G , ip_B = ip_B ) if G . shape [ 1 ] == 0 : theta = numpy . ones ( F . shape [ 1 ] ) * numpy . pi / 2 U = QF V = QG else : Y , s , Z = scipy . linalg . svd ( inner ( QF , QG , ip_B = ip_B ) ) Vcos = numpy . dot ( QG , Z . T . conj ( ) ) n_large = numpy . flatnonzero ( ( s ** 2 ) < 0.5 ) . shape [ 0 ] n_small = s . shape [ 0 ] - n_large theta = numpy . r_ [ numpy . arccos ( s [ n_small : ] ) , numpy . ones ( F . shape [ 1 ] - G . shape [ 1 ] ) * numpy . pi / 2 ] if compute_vectors : Ucos = numpy . dot ( QF , Y ) U = Ucos [ : , n_small : ] V = Vcos [ : , n_small : ] if n_small > 0 : RG = Vcos [ : , : n_small ] S = RG - numpy . dot ( QF , inner ( QF , RG , ip_B = ip_B ) ) _ , R = qr ( S , ip_B = ip_B ) Y , u , Z = scipy . linalg . svd ( R ) theta = numpy . r_ [ numpy . arcsin ( u [ : : - 1 ] [ : n_small ] ) , theta ] if compute_vectors : RF = Ucos [ : , : n_small ] Vsin = numpy . dot ( RG , Z . T . conj ( ) ) Usin = numpy . dot ( RF , numpy . dot ( numpy . diag ( 1 / s [ : n_small ] ) , numpy . dot ( Z . T . conj ( ) , numpy . diag ( s [ : n_small ] ) ) ) ) U = numpy . c_ [ Usin , U ] V = numpy . c_ [ Vsin , V ] if compute_vectors : if reverse : U , V = V , U return theta , U , V else : return theta | Principal angles between two subspaces . |
62,443 | def gap ( lamda , sigma , mode = 'individual' ) : if numpy . isscalar ( lamda ) : lamda = [ lamda ] lamda = numpy . array ( lamda ) if numpy . isscalar ( sigma ) : sigma = [ sigma ] sigma = numpy . array ( sigma ) if not numpy . isreal ( lamda ) . all ( ) or not numpy . isreal ( sigma ) . all ( ) : raise ArgumentError ( 'complex spectra not yet implemented' ) if mode == 'individual' : return numpy . min ( numpy . abs ( numpy . reshape ( lamda , ( len ( lamda ) , 1 ) ) - numpy . reshape ( sigma , ( 1 , len ( sigma ) ) ) ) ) elif mode == 'interval' : lamda_min , lamda_max = numpy . min ( lamda ) , numpy . max ( lamda ) sigma_lo = sigma <= lamda_min sigma_hi = sigma >= lamda_max if not numpy . all ( sigma_lo + sigma_hi ) : return None delta = numpy . Infinity if numpy . any ( sigma_lo ) : delta = lamda_min - numpy . max ( sigma [ sigma_lo ] ) if numpy . any ( sigma_hi ) : delta = numpy . min ( [ delta , numpy . min ( sigma [ sigma_hi ] ) - lamda_max ] ) return delta | Compute spectral gap . |
62,444 | def bound_perturbed_gmres ( pseudo , p , epsilon , deltas ) : if not numpy . all ( numpy . array ( deltas ) > epsilon ) : raise ArgumentError ( 'all deltas have to be greater than epsilon' ) bound = [ ] for delta in deltas : paths = pseudo . contour_paths ( delta ) vertices = paths . vertices ( ) supremum = numpy . max ( numpy . abs ( p ( vertices ) ) ) bound . append ( epsilon / ( delta - epsilon ) * paths . length ( ) / ( 2 * numpy . pi * delta ) * supremum ) return bound | Compute GMRES perturbation bound based on pseudospectrum |
62,445 | def get_residual_norms ( H , self_adjoint = False ) : H = H . copy ( ) n_ , n = H . shape y = numpy . eye ( n_ , 1 , dtype = H . dtype ) resnorms = [ 1. ] for i in range ( n_ - 1 ) : G = Givens ( H [ i : i + 2 , [ i ] ] ) if self_adjoint : H [ i : i + 2 , i : i + 3 ] = G . apply ( H [ i : i + 2 , i : i + 3 ] ) else : H [ i : i + 2 , i : ] = G . apply ( H [ i : i + 2 , i : ] ) y [ i : i + 2 ] = G . apply ( y [ i : i + 2 ] ) resnorms . append ( numpy . abs ( y [ i + 1 , 0 ] ) ) if n_ == n : resnorms . append ( 0. ) return numpy . array ( resnorms ) | Compute relative residual norms from Hessenberg matrix . |
62,446 | def apply ( self , x ) : if len ( x . shape ) != 2 : raise ArgumentError ( 'x is not a matrix of shape (N,*)' ) if self . beta == 0 : return x return x - self . beta * self . v * numpy . dot ( self . v . T . conj ( ) , x ) | Apply Householder transformation to vector x . |
62,447 | def matrix ( self ) : n = self . v . shape [ 0 ] return numpy . eye ( n , n ) - self . beta * numpy . dot ( self . v , self . v . T . conj ( ) ) | Build matrix representation of Householder transformation . |
62,448 | def _apply ( self , a , return_Ya = False ) : r if self . V . shape [ 1 ] == 0 : Pa = numpy . zeros ( a . shape ) if return_Ya : return Pa , numpy . zeros ( ( 0 , a . shape [ 1 ] ) ) return Pa c = inner ( self . W , a , ip_B = self . ip_B ) if return_Ya : Ya = c . copy ( ) if self . WR is not None : Ya = self . WR . T . conj ( ) . dot ( Ya ) if self . Q is not None and self . R is not None : c = scipy . linalg . solve_triangular ( self . R , self . Q . T . conj ( ) . dot ( c ) ) Pa = self . V . dot ( c ) if return_Ya : return Pa , Ya return Pa | r Single application of the projection . |
62,449 | def _apply_adj ( self , a ) : if self . V . shape [ 1 ] == 0 : return numpy . zeros ( a . shape ) c = inner ( self . V , a , ip_B = self . ip_B ) if self . Q is not None and self . R is not None : c = self . Q . dot ( scipy . linalg . solve_triangular ( self . R . T . conj ( ) , c , lower = True ) ) return self . W . dot ( c ) | Single application of the adjoint projection . |
62,450 | def apply ( self , a , return_Ya = False ) : r if self . V . shape [ 1 ] == 0 : Pa = numpy . zeros ( a . shape ) if return_Ya : return Pa , numpy . zeros ( ( 0 , a . shape [ 1 ] ) ) return Pa if return_Ya : x , Ya = self . _apply ( a , return_Ya = return_Ya ) else : x = self . _apply ( a ) for i in range ( self . iterations - 1 ) : z = a - x w = self . _apply ( z ) x = x + w if return_Ya : return x , Ya return x | r Apply the projection to an array . |
62,451 | def apply_complement ( self , a , return_Ya = False ) : if self . V . shape [ 1 ] == 0 : if return_Ya : return a . copy ( ) , numpy . zeros ( ( 0 , a . shape [ 1 ] ) ) return a . copy ( ) if return_Ya : x , Ya = self . _apply ( a , return_Ya = True ) else : x = self . _apply ( a ) z = a - x for i in range ( self . iterations - 1 ) : w = self . _apply ( z ) z = z - w if return_Ya : return z , Ya return z | Apply the complementary projection to an array . |
62,452 | def get ( self , key ) : if key in self and len ( self [ key ] ) > 0 : return min ( self [ key ] ) else : return 0 | Return timings for key . Returns 0 if not present . |
62,453 | def get_ops ( self , ops ) : time = 0. for op , count in ops . items ( ) : time += self . get ( op ) * count return time | Return timings for dictionary ops holding the operation names as keys and the number of applications as values . |
62,454 | def min_pos ( self ) : if self . __len__ ( ) == 0 : return ArgumentError ( 'empty set has no minimum positive value.' ) if self . contains ( 0 ) : return None positive = [ interval for interval in self . intervals if interval . left > 0 ] if len ( positive ) == 0 : return None return numpy . min ( list ( map ( lambda i : i . left , positive ) ) ) | Returns minimal positive value or None . |
62,455 | def max_neg ( self ) : if self . __len__ ( ) == 0 : return ArgumentError ( 'empty set has no maximum negative value.' ) if self . contains ( 0 ) : return None negative = [ interval for interval in self . intervals if interval . right < 0 ] if len ( negative ) == 0 : return None return numpy . max ( list ( map ( lambda i : i . right , negative ) ) ) | Returns maximum negative value or None . |
62,456 | def min_abs ( self ) : if self . __len__ ( ) == 0 : return ArgumentError ( 'empty set has no minimum absolute value.' ) if self . contains ( 0 ) : return 0 return numpy . min ( [ numpy . abs ( val ) for val in [ self . max_neg ( ) , self . min_pos ( ) ] if val is not None ] ) | Returns minimum absolute value . |
62,457 | def max_abs ( self ) : if self . __len__ ( ) == 0 : return ArgumentError ( 'empty set has no maximum absolute value.' ) return numpy . max ( numpy . abs ( [ self . max ( ) , self . min ( ) ] ) ) | Returns maximum absolute value . |
62,458 | def get_step ( self , tol ) : return 2 * numpy . log ( tol / 2. ) / numpy . log ( self . base ) | Return step at which bound falls below tolerance . |
62,459 | def minmax_candidates ( self ) : from numpy . polynomial import Polynomial as P p = P . fromroots ( self . roots ) return p . deriv ( 1 ) . roots ( ) | Get points where derivative is zero . |
62,460 | def errors ( self ) : try : self . now = datetime . datetime . now ( ) if len ( self . alarm_day ) < 2 or len ( self . alarm_day ) > 2 : print ( "error: day: usage 'DD' such us '0%s' not '%s'" % ( self . alarm_day , self . alarm_day ) ) self . RUN_ALARM = False if int ( self . alarm_day ) > calendar . monthrange ( self . now . year , self . now . month ) [ 1 ] or int ( self . alarm_day ) < 1 : print ( "error: day: out of range" ) self . RUN_ALARM = False if ( len ( self . alarm_time ) != len ( self . alarm_pattern ) or len ( self . alarm_time [ 0 ] ) < 2 or len ( self . alarm_time [ 0 ] ) > 2 or len ( self . alarm_time [ 1 ] ) < 2 or len ( self . alarm_time [ 1 ] ) > 2 ) : print ( "error: time: usage '%s'" % ":" . join ( self . alarm_pattern ) ) self . RUN_ALARM = False if int ( self . alarm_hour ) not in range ( 0 , 24 ) : print ( "error: hour: out of range" ) self . RUN_ALARM = False if int ( self . alarm_minutes ) not in range ( 0 , 60 ) : print ( "error: minutes: out of range" ) self . RUN_ALARM = False except ValueError : print ( "Usage '%s'" % ":" . join ( self . alarm_pattern ) ) self . RUN_ALARM = False if not os . path . isfile ( self . song ) : print ( "error: song: file does not exist" ) self . RUN_ALARM = False | Check for usage errors |
62,461 | def _get_best_subset ( self , ritz ) : overall_evaluations = { } def evaluate ( _subset , _evaluations ) : try : _evaluations [ _subset ] = self . subset_evaluator . evaluate ( ritz , _subset ) except utils . AssumptionError : pass current_subset = frozenset ( ) evaluate ( current_subset , overall_evaluations ) while True : remaining_subset = set ( range ( len ( ritz . values ) ) ) . difference ( current_subset ) subsets = self . subsets_generator . generate ( ritz , remaining_subset ) if len ( subsets ) == 0 : break evaluations = { } for subset in subsets : eval_subset = current_subset . union ( subset ) evaluate ( eval_subset , evaluations ) if len ( evaluations ) > 0 : current_subset = min ( evaluations , key = evaluations . get ) else : resnorms = [ numpy . sum ( ritz . resnorms [ list ( subset ) ] ) for subset in subsets ] subset = subsets [ numpy . argmin ( resnorms ) ] current_subset = current_subset . union ( subset ) overall_evaluations . update ( evaluations ) if len ( overall_evaluations ) > 0 : selection = list ( min ( overall_evaluations , key = overall_evaluations . get ) ) else : selection = [ ] if self . print_results == 'number' : print ( '# of selected deflation vectors: {0}' . format ( len ( selection ) ) ) elif self . print_results == 'values' : print ( '{0} Ritz values corresponding to selected deflation ' . format ( len ( selection ) ) + 'vectors: ' + ( ', ' . join ( [ str ( el ) for el in ritz . values [ selection ] ] ) ) ) elif self . print_results == 'timings' : import operator print ( 'Timings for all successfully evaluated choices of ' 'deflation vectors with corresponding Ritz values:' ) for subset , time in sorted ( overall_evaluations . items ( ) , key = operator . itemgetter ( 1 ) ) : print ( ' {0}s: ' . format ( time ) + ', ' . join ( [ str ( el ) for el in ritz . values [ list ( subset ) ] ] ) ) elif self . print_results is None : pass else : raise utils . ArgumentError ( 'Invalid value `{0}` for argument `print_result`. ' . format ( self . print_results ) + 'Valid are `None`, `number`, `values` and `timings`.' ) return selection | Return candidate set with smallest goal functional . |
62,462 | def set_default_command ( self , command ) : cmd_name = command . name self . add_command ( command ) self . default_cmd_name = cmd_name | Sets a command function as the default command . |
62,463 | def get_residual ( self , z , compute_norm = False ) : r if z is None : if compute_norm : return self . MMlb , self . Mlb , self . MMlb_norm return self . MMlb , self . Mlb r = self . b - self . A * z Mlr = self . Ml * r MMlr = self . M * Mlr if compute_norm : return MMlr , Mlr , utils . norm ( Mlr , MMlr , ip_B = self . ip_B ) return MMlr , Mlr | r Compute residual . |
62,464 | def get_ip_Minv_B ( self ) : if not isinstance ( self . M , utils . IdentityLinearOperator ) : if isinstance ( self . Minv , utils . IdentityLinearOperator ) : raise utils . ArgumentError ( 'Minv has to be provided for the evaluation of the inner ' 'product that is implicitly defined by M.' ) if isinstance ( self . ip_B , utils . LinearOperator ) : return self . Minv * self . ip_B else : return lambda x , y : self . ip_B ( x , self . Minv * y ) return self . ip_B | Returns the inner product that is implicitly used with the positive definite preconditioner M . |
62,465 | def _get_xk ( self , yk ) : if yk is not None : return self . x0 + self . linear_system . Mr * yk return self . x0 | Compute approximate solution from initial guess and approximate solution of the preconditioned linear system . |
62,466 | def _finalize_iteration ( self , yk , resnorm ) : self . xk = None if self . linear_system . exact_solution is not None : self . xk = self . _get_xk ( yk ) self . errnorms . append ( utils . norm ( self . linear_system . exact_solution - self . xk , ip_B = self . linear_system . ip_B ) ) rkn = None if self . explicit_residual or resnorm / self . linear_system . MMlb_norm <= self . tol or self . iter + 1 == self . maxiter : if self . xk is None : self . xk = self . _get_xk ( yk ) _ , _ , rkn = self . linear_system . get_residual ( self . xk , compute_norm = True ) self . resnorms . append ( rkn / self . linear_system . MMlb_norm ) if self . resnorms [ - 1 ] > self . tol : if self . iter + 1 == self . maxiter : self . _finalize ( ) raise utils . ConvergenceError ( ( 'No convergence in last iteration ' '(maxiter: {0}, residual: {1}).' ) . format ( self . maxiter , self . resnorms [ - 1 ] ) , self ) elif not self . explicit_residual and resnorm / self . linear_system . MMlb_norm <= self . tol : warnings . warn ( 'updated residual is below tolerance, explicit ' 'residual is NOT! (upd={0} <= tol={1} < exp={2})' . format ( resnorm , self . tol , self . resnorms [ - 1 ] ) ) else : self . resnorms . append ( resnorm / self . linear_system . MMlb_norm ) return rkn | Compute solution error norm and residual norm if required . |
62,467 | def operations ( nsteps ) : return { 'A' : 1 + nsteps , 'M' : 2 + nsteps , 'Ml' : 2 + nsteps , 'Mr' : 1 + nsteps , 'ip_B' : 2 + nsteps + nsteps * ( nsteps + 1 ) / 2 , 'axpy' : 4 + 2 * nsteps + nsteps * ( nsteps + 1 ) / 2 } | Returns the number of operations needed for nsteps of GMRES |
62,468 | def solve ( self , linear_system , vector_factory = None , * args , ** kwargs ) : if not isinstance ( linear_system , linsys . TimedLinearSystem ) : linear_system = linsys . ConvertedTimedLinearSystem ( linear_system ) with self . timings [ 'vector_factory' ] : if vector_factory is None : vector_factory = self . _vector_factory if vector_factory == 'RitzApproxKrylov' : vector_factory = factories . RitzFactory ( subset_evaluator = evaluators . RitzApproxKrylov ( ) ) elif vector_factory == 'RitzAprioriCg' : vector_factory = factories . RitzFactory ( subset_evaluator = evaluators . RitzApriori ( Bound = utils . BoundCG ) ) elif vector_factory == 'RitzAprioriMinres' : vector_factory = factories . RitzFactory ( subset_evaluator = evaluators . RitzApriori ( Bound = utils . BoundMinres ) ) if self . last_solver is None or vector_factory is None : U = numpy . zeros ( ( linear_system . N , 0 ) ) else : U = vector_factory . get ( self . last_solver ) with self . timings [ 'solve' ] : self . last_solver = self . _DeflatedSolver ( linear_system , U = U , store_arnoldi = True , * args , ** kwargs ) return self . last_solver | Solve the given linear system with recycling . |
62,469 | def compute_hash ( func , string ) : h = func ( ) h . update ( string ) return h . hexdigest ( ) | compute hash of string using given hash function |
62,470 | def get_local_serial ( ) : return [ x for x in [ subprocess . Popen ( "system_profiler SPHardwareDataType |grep -v tray |awk '/Serial/ {print $4}'" , shell = True , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] . strip ( ) ] if x ] | Retrieves the serial number from the executing host . For example C02NT43PFY14 |
62,471 | def _estimate_eval_intervals ( ritz , indices , indices_remaining , eps_min = 0 , eps_max = 0 , eps_res = None ) : if len ( indices ) == 0 : return utils . Intervals ( [ utils . Interval ( mu - resnorm , mu + resnorm ) for mu , resnorm in zip ( ritz . values , ritz . resnorms ) ] ) if len ( ritz . values ) == len ( indices ) : raise utils . AssumptionError ( 'selection of all Ritz pairs does not allow estimation.' ) if eps_res is None : eps_res = numpy . max ( numpy . abs ( [ eps_min , eps_max ] ) ) delta_sel = numpy . linalg . norm ( ritz . resnorms [ indices ] , 2 ) delta_non_sel = numpy . linalg . norm ( ritz . resnorms [ indices_remaining ] , 2 ) delta = utils . gap ( ritz . values [ indices ] , ritz . values [ indices_remaining ] ) mu_ints = utils . Intervals ( [ utils . Interval ( mu + eps_min , mu + eps_max ) for mu in ritz . values [ indices ] ] ) mu_min = mu_ints . min_abs ( ) if delta_sel + eps_max - eps_min >= delta : raise utils . AssumptionError ( 'delta_sel + delta_non_sel + eps_max - eps_min >= delta' + '({0} >= {1}' . format ( delta_sel + delta_non_sel + eps_max - eps_min , delta ) ) if mu_min == 0 : raise utils . AssumptionError ( 'mu_min == 0 not allowed' ) eta = ( delta_sel + eps_res ) ** 2 * ( 1 / ( delta - eps_max + eps_min ) + 1 / mu_min ) left = eps_min - eta right = eps_max + eta return utils . Intervals ( [ utils . Interval ( mu + left , mu + right ) for mu in ritz . values [ indices_remaining ] ] ) | Estimate evals based on eval inclusion theorem + heuristic . |
62,472 | def correct ( self , z ) : c = self . linear_system . Ml * ( self . linear_system . b - self . linear_system . A * z ) c = utils . inner ( self . W , c , ip_B = self . ip_B ) if self . Q is not None and self . R is not None : c = scipy . linalg . solve_triangular ( self . R , self . Q . T . conj ( ) . dot ( c ) ) if self . WR is not self . VR : c = self . WR . dot ( scipy . linalg . solve_triangular ( self . VR , c ) ) return z + self . W . dot ( c ) | Correct the given approximate solution z with respect to the linear system linear_system and the deflation space defined by U . |
62,473 | def _apply_projection ( self , Av ) : PAv , UAv = self . projection . apply_complement ( Av , return_Ya = True ) self . C = numpy . c_ [ self . C , UAv ] return PAv | Apply the projection and store inner product . |
62,474 | def _get_initial_residual ( self , x0 ) : if x0 is None : Mlr = self . linear_system . Mlb else : r = self . linear_system . b - self . linear_system . A * x0 Mlr = self . linear_system . Ml * r PMlr , self . UMlr = self . projection . apply_complement ( Mlr , return_Ya = True ) MPMlr = self . linear_system . M * PMlr MPMlr_norm = utils . norm ( PMlr , MPMlr , ip_B = self . linear_system . ip_B ) return MPMlr , PMlr , MPMlr_norm | Return the projected initial residual . |
62,475 | def estimate_time ( self , nsteps , ndefl , deflweight = 1.0 ) : solver_ops = self . operations ( nsteps ) proj_ops = { 'A' : ndefl , 'M' : ndefl , 'Ml' : ndefl , 'Mr' : ndefl , 'ip_B' : ( ndefl * ( ndefl + 1 ) / 2 + ndefl ** 2 + 2 * ndefl * solver_ops [ 'Ml' ] ) , 'axpy' : ( ndefl * ( ndefl + 1 ) / 2 + ndefl * ndefl + ( 2 * ndefl + 2 ) * solver_ops [ 'Ml' ] ) } if not isinstance ( self . linear_system , linsys . TimedLinearSystem ) : raise utils . RuntimeError ( 'A `TimedLinearSystem` has to be used in order to obtain ' 'timings.' ) timings = self . linear_system . timings return ( timings . get_ops ( solver_ops ) + deflweight * timings . get_ops ( proj_ops ) ) | Estimate time needed to run nsteps iterations with deflation |
62,476 | def get_vectors ( self , indices = None ) : H_ = self . _deflated_solver . H ( n_ , n ) = H_ . shape coeffs = self . coeffs if indices is None else self . coeffs [ : , indices ] return numpy . c_ [ self . _deflated_solver . V [ : , : n ] , self . _deflated_solver . projection . U ] . dot ( coeffs ) | Compute Ritz vectors . |
62,477 | def get_explicit_residual ( self , indices = None ) : ritz_vecs = self . get_vectors ( indices ) return self . _deflated_solver . linear_system . MlAMr * ritz_vecs - ritz_vecs * self . values | Explicitly computes the Ritz residual . |
62,478 | def get_explicit_resnorms ( self , indices = None ) : res = self . get_explicit_residual ( indices ) linear_system = self . _deflated_solver . linear_system Mres = linear_system . M * res resnorms = numpy . zeros ( res . shape [ 1 ] ) for i in range ( resnorms . shape [ 0 ] ) : resnorms [ i ] = utils . norm ( res [ : , [ i ] ] , Mres [ : , [ i ] ] , ip_B = linear_system . ip_B ) return resnorms | Explicitly computes the Ritz residual norms . |
62,479 | def transform_data ( self , data ) : def type_check ( value ) : if pd . isnull ( value ) : return None elif ( isinstance ( value , pd . tslib . Timestamp ) or isinstance ( value , pd . Period ) ) : return time . mktime ( value . timetuple ( ) ) elif isinstance ( value , ( int , np . integer ) ) : return int ( value ) elif isinstance ( value , ( float , np . float_ ) ) : return float ( value ) elif isinstance ( value , str ) : return str ( value ) else : return value objectify = lambda dat : [ { "x" : type_check ( x ) , "y" : type_check ( y ) } for x , y in dat . iteritems ( ) ] self . raw_data = data if isinstance ( data , pd . Series ) : data . name = data . name or 'data' self . json_data = [ { 'name' : data . name , 'data' : objectify ( data ) } ] elif isinstance ( data , pd . DataFrame ) : self . json_data = [ { 'name' : x [ 0 ] , 'data' : objectify ( x [ 1 ] ) } for x in data . iteritems ( ) ] | Transform Pandas Timeseries into JSON format |
62,480 | def _build_graph ( self ) : if not self . colors : self . palette = self . env . get_template ( 'palette.js' ) self . template_vars . update ( { 'palette' : self . palette . render ( ) } ) self . colors = { x [ 'name' ] : 'palette.color()' for x in self . json_data } template_vars = [ ] for index , dataset in enumerate ( self . json_data ) : group = 'datagroup' + str ( index ) template_vars . append ( { 'name' : str ( dataset [ 'name' ] ) , 'color' : self . colors [ dataset [ 'name' ] ] , 'data' : 'json[{0}].data' . format ( index ) } ) variables = { 'dataset' : template_vars , 'width' : self . width , 'height' : self . height , 'render' : self . renderer , 'chart_id' : self . chart_id } if not self . y_zero : variables . update ( { 'min' : "min: 'auto'," } ) graph = self . env . get_template ( 'graph.js' ) self . template_vars . update ( { 'graph' : graph . render ( variables ) } ) | Build Rickshaw graph syntax with all data |
62,481 | def create_chart ( self , html_path = 'index.html' , data_path = 'data.json' , js_path = 'rickshaw.min.js' , css_path = 'rickshaw.min.css' , html_prefix = '' ) : self . template_vars . update ( { 'data_path' : str ( data_path ) , 'js_path' : js_path , 'css_path' : css_path , 'chart_id' : self . chart_id , 'y_axis_id' : self . y_axis_id , 'legend_id' : self . legend_id , 'slider_id' : self . slider_id } ) self . _build_graph ( ) html = self . env . get_template ( 'bcart_template.html' ) self . HTML = html . render ( self . template_vars ) with open ( os . path . join ( html_prefix , html_path ) , 'w' ) as f : f . write ( self . HTML ) with open ( os . path . join ( html_prefix , data_path ) , 'w' ) as f : json . dump ( self . json_data , f , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) if js_path : js = resource_string ( 'bearcart' , 'rickshaw.min.js' ) with open ( os . path . join ( html_prefix , js_path ) , 'w' ) as f : f . write ( js ) if css_path : css = resource_string ( 'bearcart' , 'rickshaw.min.css' ) with open ( os . path . join ( html_prefix , css_path ) , 'w' ) as f : f . write ( css ) | Save bearcart output to HTML and JSON . |
62,482 | def set_expire ( self , y = 2999 , mon = 12 , d = 28 , h = 23 , min_ = 59 , s = 59 ) : if type ( y ) is not int or type ( mon ) is not int or type ( d ) is not int or type ( h ) is not int or type ( min_ ) is not int or type ( s ) is not int : raise KPError ( "Date variables must be integers" ) elif y > 9999 or y < 1 or mon > 12 or mon < 1 or d > 31 or d < 1 or h > 23 or h < 0 or min_ > 59 or min_ < 0 or s > 59 or s < 0 : raise KPError ( "No legal date" ) elif ( ( mon == 1 or mon == 3 or mon == 5 or mon == 7 or mon == 8 or mon == 10 or mon == 12 ) and d > 31 ) or ( ( mon == 4 or mon == 6 or mon == 9 or mon == 11 ) and d > 30 ) or ( mon == 2 and d > 28 ) : raise KPError ( "Given day doesn't exist in given month" ) else : self . expire = datetime ( y , mon , d , h , min_ , s ) self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to change the expire date of a group |
62,483 | def create_entry ( self , title = '' , image = 1 , url = '' , username = '' , password = '' , comment = '' , y = 2999 , mon = 12 , d = 28 , h = 23 , min_ = 59 , s = 59 ) : return self . db . create_entry ( self , title , image , url , username , password , comment , y , mon , d , h , min_ , s ) | This method creates an entry in this group . |
62,484 | def set_title ( self , title = None ) : if title is None or type ( title ) is not str : raise KPError ( "Need a new title." ) else : self . title = title self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to change an entry title . |
62,485 | def set_image ( self , image = None ) : if image is None or type ( image ) is not int : raise KPError ( "Need a new image number" ) else : self . image = image self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to set the image number . |
62,486 | def set_url ( self , url = None ) : if url is None or type ( url ) is not str : raise KPError ( "Need a new image number" ) else : self . url = url self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to set the url . |
62,487 | def set_username ( self , username = None ) : if username is None or type ( username ) is not str : raise KPError ( "Need a new image number" ) else : self . username = username self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to set the username . |
62,488 | def set_password ( self , password = None ) : if password is None or type ( password ) is not str : raise KPError ( "Need a new image number" ) else : self . password = password self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to set the password . |
62,489 | def set_comment ( self , comment = None ) : if comment is None or type ( comment ) is not str : raise KPError ( "Need a new image number" ) else : self . comment = comment self . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | This method is used to the the comment . |
62,490 | def read_buf ( self ) : with open ( self . filepath , 'rb' ) as handler : try : buf = handler . read ( ) if len ( buf ) < 124 : raise KPError ( 'Unexpected file size. It should be more or' 'equal 124 bytes but it is ' '{0}!' . format ( len ( buf ) ) ) except : raise return buf | Read database file |
62,491 | def close ( self ) : if self . filepath is not None : if path . isfile ( self . filepath + '.lock' ) : remove ( self . filepath + '.lock' ) self . filepath = None self . read_only = False self . lock ( ) return True else : raise KPError ( 'Can\'t close a not opened file' ) | This method closes the database correctly . |
62,492 | def lock ( self ) : self . password = None self . keyfile = None self . groups [ : ] = [ ] self . entries [ : ] = [ ] self . _group_order [ : ] = [ ] self . _entry_order [ : ] = [ ] self . root_group = v1Group ( ) self . _num_groups = 1 self . _num_entries = 0 return True | This method locks the database . |
62,493 | def unlock ( self , password = None , keyfile = None , buf = None ) : if ( ( password is None or password == "" ) and ( keyfile is None or keyfile == "" ) ) : raise KPError ( "A password/keyfile is needed" ) elif ( ( type ( password ) is not str and password is not None ) or ( type ( keyfile ) is not str and keyfile is not None ) ) : raise KPError ( "password/keyfile must be a string." ) if keyfile == "" : keyfile = None if password == "" : password = None self . password = password self . keyfile = keyfile return self . load ( buf ) | Unlock the database . masterkey is needed . |
62,494 | def remove_group ( self , group = None ) : if group is None : raise KPError ( "Need group to remove a group" ) elif type ( group ) is not v1Group : raise KPError ( "group must be v1Group" ) children = [ ] entries = [ ] if group in self . groups : children . extend ( group . children ) entries . extend ( group . entries ) group . parent . children . remove ( group ) self . groups . remove ( group ) else : raise KPError ( "Given group doesn't exist" ) self . _num_groups -= 1 for i in children : self . remove_group ( i ) for i in entries : self . remove_entry ( i ) return True | This method removes a group . |
62,495 | def move_group ( self , group = None , parent = None ) : if group is None or type ( group ) is not v1Group : raise KPError ( "A valid group must be given." ) elif parent is not None and type ( parent ) is not v1Group : raise KPError ( "parent must be a v1Group." ) elif group is parent : raise KPError ( "group and parent must not be the same group" ) if parent is None : parent = self . root_group if group in self . groups : self . groups . remove ( group ) group . parent . children . remove ( group ) group . parent = parent if parent . children : if parent . children [ - 1 ] is self . groups [ - 1 ] : self . groups . append ( group ) else : new_index = self . groups . index ( parent . children [ - 1 ] ) + 1 self . groups . insert ( new_index , group ) else : new_index = self . groups . index ( parent ) + 1 self . groups . insert ( new_index , group ) parent . children . append ( group ) if parent is self . root_group : group . level = 0 else : group . level = parent . level + 1 if group . children : self . _move_group_helper ( group ) group . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True else : raise KPError ( "Didn't find given group." ) | Append group to a new parent . |
62,496 | def move_group_in_parent ( self , group = None , index = None ) : if group is None or index is None : raise KPError ( "group and index must be set" ) elif type ( group ) is not v1Group or type ( index ) is not int : raise KPError ( "group must be a v1Group-instance and index " "must be an integer." ) elif group not in self . groups : raise KPError ( "Given group doesn't exist" ) elif index < 0 or index >= len ( group . parent . children ) : raise KPError ( "index must be a valid index if group.parent.groups" ) else : group_at_index = group . parent . children [ index ] pos_in_parent = group . parent . children . index ( group ) pos_in_groups = self . groups . index ( group ) pos_in_groups2 = self . groups . index ( group_at_index ) group . parent . children [ index ] = group group . parent . children [ pos_in_parent ] = group_at_index self . groups [ pos_in_groups2 ] = group self . groups [ pos_in_groups ] = group_at_index if group . children : self . _move_group_helper ( group ) if group_at_index . children : self . _move_group_helper ( group_at_index ) group . last_mod = datetime . now ( ) . replace ( microsecond = 0 ) return True | Move group to another position in group s parent . index must be a valid index of group . parent . groups |
62,497 | def _move_group_helper ( self , group ) : for i in group . children : self . groups . remove ( i ) i . level = group . level + 1 self . groups . insert ( self . groups . index ( group ) + 1 , i ) if i . children : self . _move_group_helper ( i ) | A helper to move the chidren of a group . |
62,498 | def create_entry ( self , group = None , title = "" , image = 1 , url = "" , username = "" , password = "" , comment = "" , y = 2999 , mon = 12 , d = 28 , h = 23 , min_ = 59 , s = 59 ) : if ( type ( title ) is not str or type ( image ) is not int or image < 0 or type ( url ) is not str or type ( username ) is not str or type ( password ) is not str or type ( comment ) is not str or type ( y ) is not int or type ( mon ) is not int or type ( d ) is not int or type ( h ) is not int or type ( min_ ) is not int or type ( s ) is not int or type ( group ) is not v1Group ) : raise KPError ( "One argument has not a valid type." ) elif group not in self . groups : raise KPError ( "Group doesn't exist." ) elif ( y > 9999 or y < 1 or mon > 12 or mon < 1 or d > 31 or d < 1 or h > 23 or h < 0 or min_ > 59 or min_ < 0 or s > 59 or s < 0 ) : raise KPError ( "No legal date" ) elif ( ( ( mon == 1 or mon == 3 or mon == 5 or mon == 7 or mon == 8 or mon == 10 or mon == 12 ) and d > 31 ) or ( ( mon == 4 or mon == 6 or mon == 9 or mon == 11 ) and d > 30 ) or ( mon == 2 and d > 28 ) ) : raise KPError ( "Given day doesn't exist in given month" ) Random . atfork ( ) uuid = Random . get_random_bytes ( 16 ) entry = v1Entry ( group . id_ , group , image , title , url , username , password , comment , datetime . now ( ) . replace ( microsecond = 0 ) , datetime . now ( ) . replace ( microsecond = 0 ) , datetime . now ( ) . replace ( microsecond = 0 ) , datetime ( y , mon , d , h , min_ , s ) , uuid ) self . entries . append ( entry ) group . entries . append ( entry ) self . _num_entries += 1 return True | This method creates a new entry . The group which should hold the entry is needed . |
62,499 | def remove_entry ( self , entry = None ) : if entry is None or type ( entry ) is not v1Entry : raise KPError ( "Need an entry." ) elif entry in self . entries : entry . group . entries . remove ( entry ) self . entries . remove ( entry ) self . _num_entries -= 1 return True else : raise KPError ( "Given entry doesn't exist." ) | This method can remove entries . The v1Entry - object entry is needed . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.