idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
6,200
def _validate_config ( config ) : if not isinstance ( config , list ) : raise TypeError ( 'Config must be a list' ) for config_dict in config : if not isinstance ( config_dict , dict ) : raise TypeError ( 'Config must be a list of dictionaries' ) label = config_dict . keys ( ) [ 0 ] cfg = config_dict [ label ] if not isinstance ( cfg , dict ) : raise TypeError ( 'Config structure is broken' ) if 'host' not in cfg : raise TypeError ( 'Config entries must have a value for host' ) if not isinstance ( cfg [ 'host' ] , str ) and not isinstance ( cfg [ 'host' ] , list ) : raise TypeError ( 'Host must be a string or a list.' ) if 'port' not in cfg : raise TypeError ( 'Config entries must have a value for port' ) if not isinstance ( cfg [ 'port' ] , int ) : raise TypeError ( 'Port must be an int' ) if 'dbpath' not in cfg : raise TypeError ( 'Config entries must have a value for dbpath' ) if not isinstance ( cfg [ 'dbpath' ] , str ) : if not isinstance ( cfg [ 'dbpath' ] , list ) : raise TypeError ( 'Dbpath must either a string or a list of ' 'strings' ) for dbpath in cfg [ 'dbpath' ] : if not isinstance ( dbpath , str ) : raise TypeError ( 'Dbpath must either a string or a list ' 'of strings' ) if ( 'read_preference' in cfg and not isinstance ( cfg [ 'read_preference' ] , str ) ) : raise TypeError ( 'Read_preference must be a string' ) if ( 'replicaSet' in cfg and not isinstance ( cfg [ 'replicaSet' ] , str ) ) : raise TypeError ( 'replicaSet must be a string' )
Validate that the provided configurtion is valid .
6,201
def _parse_configs ( self , config ) : for config_dict in config : label = config_dict . keys ( ) [ 0 ] cfg = config_dict [ label ] dbpath = cfg [ 'dbpath' ] pattern = self . _parse_dbpath ( dbpath ) read_preference = cfg . get ( 'read_preference' , 'primary' ) . upper ( ) read_preference = self . _get_read_preference ( read_preference ) cluster_config = { 'params' : { 'host' : cfg [ 'host' ] , 'port' : cfg [ 'port' ] , 'read_preference' : read_preference , 'replicaSet' : cfg . get ( 'replicaSet' ) } , 'pattern' : pattern , 'label' : label } self . _clusters . append ( cluster_config )
Builds a dict with information to connect to Clusters .
6,202
def _parse_dbpath ( dbpath ) : if isinstance ( dbpath , list ) : dbpath = '|' . join ( dbpath ) if not dbpath . endswith ( '$' ) : dbpath = '(%s)$' % dbpath return dbpath
Converts the dbpath to a regexp pattern .
6,203
def _get_read_preference ( read_preference ) : read_preference = getattr ( pymongo . ReadPreference , read_preference , None ) if read_preference is None : raise ValueError ( 'Invalid read preference: %s' % read_preference ) return read_preference
Converts read_preference from string to pymongo . ReadPreference value .
6,204
def set_timeout ( self , network_timeout ) : if network_timeout == self . _network_timeout : return self . _network_timeout = network_timeout self . _disconnect ( )
Set the timeout for existing and future Clients .
6,205
def _disconnect ( self ) : for cluster in self . _clusters : if 'connection' in cluster : connection = cluster . pop ( 'connection' ) connection . close ( ) for dbname in self . _mapped_databases : self . __delattr__ ( dbname ) self . _mapped_databases = [ ]
Disconnect from all MongoDB Clients .
6,206
def _get_connection ( self , cluster ) : if 'connection' not in cluster : cluster [ 'connection' ] = self . _connection_class ( socketTimeoutMS = self . _network_timeout , w = 1 , j = self . j , ** cluster [ 'params' ] ) return cluster [ 'connection' ]
Return a connection to a Cluster .
6,207
def _match_dbname ( self , dbname ) : for config in self . _clusters : if re . match ( config [ 'pattern' ] , dbname ) : return config raise Exception ( 'No such database %s.' % dbname )
Map a database name to the Cluster that holds the database .
6,208
def try_ntime ( max_try , func , * args , ** kwargs ) : if max_try < 1 : raise ValueError for i in range ( max_try ) : try : return func ( * args , ** kwargs ) except Exception as e : last_exception = e raise last_exception
Try execute a function n times until no exception raised or tried max_try times .
6,209
def highlightjs_javascript ( jquery = None ) : javascript = '' if jquery is None : jquery = get_highlightjs_setting ( 'include_jquery' , False ) if jquery : url = highlightjs_jquery_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) url = highlightjs_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) javascript += '<script>hljs.initHighlightingOnLoad();</script>' return javascript
Return HTML for highlightjs JavaScript .
6,210
def repo ( name : str , owner : str ) -> snug . Query [ dict ] : return json . loads ( ( yield f'/repos/{owner}/{name}' ) . content )
a repository lookup by owner and name
6,211
def repo ( name : str , owner : str ) -> snug . Query [ dict ] : request = snug . GET ( f'https://api.github.com/repos/{owner}/{name}' ) response = yield request return json . loads ( response . content )
a repo lookup by owner and name
6,212
def follow ( name : str ) -> snug . Query [ bool ] : request = snug . PUT ( f'https://api.github.com/user/following/{name}' ) response = yield request return response . status_code == 204
follow another user
6,213
def taskinfo ( self ) : task_input = { 'taskName' : 'QueryTask' , 'inputParameters' : { "Task_Name" : self . _name } } info = taskengine . execute ( task_input , self . _engine , cwd = self . _cwd ) task_def = info [ 'outputParameters' ] [ 'DEFINITION' ] task_def [ 'name' ] = str ( task_def . pop ( 'NAME' ) ) task_def [ 'description' ] = str ( task_def . pop ( 'DESCRIPTION' ) ) task_def [ 'displayName' ] = str ( task_def . pop ( 'DISPLAY_NAME' ) ) if 'COMMUTE_ON_SUBSET' in task_def : task_def [ 'commute_on_subset' ] = task_def . pop ( 'COMMUTE_ON_SUBSET' ) if 'COMMUTE_ON_DOWNSAMPLE' in task_def : task_def [ 'commute_on_downsample' ] = task_def . pop ( 'COMMUTE_ON_DOWNSAMPLE' ) task_def [ 'parameters' ] = [ v for v in task_def [ 'PARAMETERS' ] . values ( ) ] task_def . pop ( 'PARAMETERS' ) parameters = task_def [ 'parameters' ] for parameter in parameters : parameter [ 'name' ] = str ( parameter . pop ( 'NAME' ) ) parameter [ 'description' ] = str ( parameter . pop ( 'DESCRIPTION' ) ) parameter [ 'display_name' ] = str ( parameter . pop ( 'DISPLAY_NAME' ) ) parameter [ 'required' ] = bool ( parameter . pop ( 'REQUIRED' ) ) if 'MIN' in parameter : parameter [ 'min' ] = parameter . pop ( 'MIN' ) if 'MAX' in parameter : parameter [ 'max' ] = parameter . pop ( 'MAX' ) if parameter [ 'TYPE' ] . count ( '[' ) : parameter [ 'type' ] , parameter [ 'dimensions' ] = parameter . pop ( 'TYPE' ) . split ( '[' ) parameter [ 'dimensions' ] = '[' + parameter [ 'dimensions' ] parameter [ 'type' ] = str ( parameter [ 'type' ] ) else : parameter [ 'type' ] = str ( parameter . pop ( 'TYPE' ) . split ( 'ARRAY' ) [ 0 ] ) if 'DIMENSIONS' in parameter : parameter [ 'dimensions' ] = parameter . pop ( 'DIMENSIONS' ) if 'DIRECTION' in parameter : parameter [ 'direction' ] = parameter . pop ( 'DIRECTION' ) . lower ( ) if 'DEFAULT' in parameter : if parameter [ 'DEFAULT' ] is not None : parameter [ 'default_value' ] = parameter . pop ( 'DEFAULT' ) else : parameter . pop ( 'DEFAULT' ) if 'CHOICE_LIST' in parameter : if parameter [ 'CHOICE_LIST' ] is not None : parameter [ 'choice_list' ] = parameter . pop ( 'CHOICE_LIST' ) else : parameter . pop ( 'CHOICE_LIST' ) if 'FOLD_CASE' in parameter : parameter [ 'fold_case' ] = parameter . pop ( 'FOLD_CASE' ) if 'AUTO_EXTENSION' in parameter : parameter [ 'auto_extension' ] = parameter . pop ( 'AUTO_EXTENSION' ) if 'IS_TEMPORARY' in parameter : parameter [ 'is_temporary' ] = parameter . pop ( 'IS_TEMPORARY' ) if 'IS_DIRECTORY' in parameter : parameter [ 'is_directory' ] = parameter . pop ( 'IS_DIRECTORY' ) return task_def
Retrieve the Task Information
6,214
def despeckle_simple ( B , th2 = 2 ) : A = np . copy ( B ) n1 = A . shape [ 0 ] dist = { u : np . diag ( A , u ) for u in range ( n1 ) } medians , stds = { } , { } for u in dist : medians [ u ] = np . median ( dist [ u ] ) stds [ u ] = np . std ( dist [ u ] ) for nw , j in itertools . product ( range ( n1 ) , range ( n1 ) ) : lp = j + nw kp = j - nw if lp < n1 : if A [ j , lp ] > medians [ nw ] + th2 * stds [ nw ] : A [ j , lp ] = medians [ nw ] if kp >= 0 : if A [ j , kp ] > medians [ nw ] + th2 * stds [ nw ] : A [ j , kp ] = medians [ nw ] return A
Single - chromosome despeckling
6,215
def bin_sparse ( M , subsampling_factor = 3 ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense binning by default." ) return bin_dense ( M . todense ( ) ) N = M . tocoo ( ) n , m = N . shape row , col , data = N . row , N . col , N . data binned_row = row // subsampling_factor binned_col = col // subsampling_factor binned_n = n // subsampling_factor binned_m = m // subsampling_factor binned_row [ binned_row >= binned_n ] -= n % subsampling_factor binned_col [ binned_col >= binned_m ] -= m % subsampling_factor result = coo_matrix ( ( data , ( binned_row , binned_col ) ) , shape = ( binned_n , binned_m ) ) return result
Perform the bin_dense procedure for sparse matrices . Remaining rows and cols are lumped with the rest at the end .
6,216
def bin_matrix ( M , subsampling_factor = 3 ) : try : from scipy . sparse import issparse if issparse ( M ) : return bin_sparse ( M , subsampling_factor = subsampling_factor ) else : raise ImportError except ImportError : return bin_dense ( M , subsampling_factor = subsampling_factor )
Bin either sparse or dense matrices .
6,217
def bin_annotation ( annotation = None , subsampling_factor = 3 ) : if annotation is None : annotation = np . array ( [ ] ) n = len ( annotation ) binned_positions = [ annotation [ i ] for i in range ( n ) if i % subsampling_factor == 0 ] if len ( binned_positions ) == 0 : binned_positions . append ( 0 ) return np . array ( binned_positions )
Perform binning on genome annotations such as contig information or bin positions .
6,218
def build_pyramid ( M , subsampling_factor = 3 ) : subs = int ( subsampling_factor ) if subs < 1 : raise ValueError ( "Subsampling factor needs to be an integer greater than 1." ) N = [ M ] while min ( N [ - 1 ] . shape ) > 1 : N . append ( bin_matrix ( N [ - 1 ] , subsampling_factor = subs ) ) return N
Iterate over a given number of times on matrix M so as to compute smaller and smaller matrices with bin_dense .
6,219
def bin_exact_kb_dense ( M , positions , length = 10 ) : unit = 10 ** 3 ul = unit * length units = positions / ul n = len ( positions ) idx = [ i for i in range ( n - 1 ) if np . ceil ( units [ i ] ) < np . ceil ( units [ i + 1 ] ) ] m = len ( idx ) - 1 N = np . zeros ( ( m , m ) ) remainders = [ 0 ] + [ np . abs ( units [ i ] - units [ i + 1 ] ) for i in range ( m ) ] for i in range ( m ) : N [ i ] = np . array ( [ ( M [ idx [ j ] : idx [ j + 1 ] , idx [ i ] : idx [ i + 1 ] ] . sum ( ) - remainders [ j ] * M [ i ] [ j ] + remainders [ j + 1 ] * M [ i + 1 ] [ j ] ) for j in range ( m ) ] ) return N
Perform the kb - binning procedure with total bin lengths being exactly set to that of the specified input . Fragments overlapping two potential bins will be split and related contact counts will be divided according to overlap proportions in each bin .
6,220
def bin_kb_sparse ( M , positions , length = 10 ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return bin_kb_dense ( M . todense ( ) , positions = positions ) r = M . tocoo ( ) unit = 10 ** 3 ul = unit * length units = positions / ul n = len ( positions ) indices = np . floor ( units ) row = [ indices [ np . floor ( i ) ] for i in r . row / ul ] col = [ indices [ np . floor ( j ) ] for j in r . col / ul ] binned_indices = positions [ [ i for i in range ( n - 1 ) if np . ceil ( units [ i ] ) < np . ceil ( units [ i + 1 ] ) ] ] return coo_matrix ( ( r . data , ( row , col ) ) ) , binned_indices
Perform the exact kb - binning procedure on a sparse matrix .
6,221
def trim_sparse ( M , n_std = 3 , s_min = None , s_max = None ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return trim_dense ( M . todense ( ) ) r = M . tocoo ( ) sparsity = np . array ( r . sum ( axis = 1 ) ) . flatten ( ) mean = np . mean ( sparsity ) std = np . std ( sparsity ) if s_min is None : s_min = mean - n_std * std if s_max is None : s_max = mean + n_std * std f = ( sparsity > s_min ) * ( sparsity < s_max ) indices = [ u for u in range ( len ( r . data ) ) if f [ r . row [ u ] ] and f [ r . col [ u ] ] ] rows = np . array ( [ r . row [ i ] for i in indices ] ) cols = np . array ( [ r . col [ j ] for j in indices ] ) data = np . array ( [ r . data [ k ] for k in indices ] ) N = coo_matrix ( ( data , ( rows , cols ) ) ) return N
Apply the trimming procedure to a sparse matrix .
6,222
def normalize_dense ( M , norm = "frag" , order = 1 , iterations = 3 ) : s = np . array ( M , np . float64 ) floatorder = np . float64 ( order ) if norm == "SCN" : for _ in range ( 0 , iterations ) : sumrows = s . sum ( axis = 1 ) maskrows = ( sumrows != 0 ) [ : , None ] * ( sumrows != 0 ) [ None , : ] sums_row = sumrows [ : , None ] * np . ones ( sumrows . shape ) [ None , : ] s [ maskrows ] = 1. * s [ maskrows ] / sums_row [ maskrows ] sumcols = s . sum ( axis = 0 ) maskcols = ( sumcols != 0 ) [ : , None ] * ( sumcols != 0 ) [ None , : ] sums_col = sumcols [ None , : ] * np . ones ( sumcols . shape ) [ : , None ] s [ maskcols ] = 1. * s [ maskcols ] / sums_col [ maskcols ] elif norm == "mirnylib" : try : from mirnylib import numutils as ntls s = ntls . iterativeCorrection ( s , iterations ) [ 0 ] except ImportError as e : print ( str ( e ) ) print ( "I can't find mirnylib." ) print ( "Please install it from " "https://bitbucket.org/mirnylab/mirnylib" ) print ( "I will use default norm as fallback." ) return normalize_dense ( M , order = order , iterations = iterations ) elif norm == "frag" : for _ in range ( 1 , iterations ) : s_norm_x = np . linalg . norm ( s , ord = floatorder , axis = 0 ) s_norm_y = np . linalg . norm ( s , ord = floatorder , axis = 1 ) s_norm = np . tensordot ( s_norm_x , s_norm_y , axes = 0 ) s [ s_norm != 0 ] = 1. * s [ s_norm != 0 ] / s_norm [ s_norm != 0 ] elif norm == "global" : s_norm = np . linalg . norm ( s , ord = floatorder ) s /= 1. * s_norm elif callable ( norm ) : s = norm ( M ) else : print ( "Unknown norm. Returning input as fallback" ) return ( s + s . T ) / 2
Apply one of the many normalization types to input dense matrix . Will also apply any callable norms such as a user - made or a lambda function .
6,223
def normalize_sparse ( M , norm = "frag" , order = 1 , iterations = 3 ) : try : from scipy . sparse import csr_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return normalize_dense ( M . todense ( ) ) r = csr_matrix ( M ) if norm == "SCN" : for _ in range ( 1 , iterations ) : row_sums = np . array ( r . sum ( axis = 1 ) ) . flatten ( ) col_sums = np . array ( r . sum ( axis = 0 ) ) . flatten ( ) row_indices , col_indices = r . nonzero ( ) r . data /= row_sums [ row_indices ] * col_sums [ col_indices ] elif norm == "global" : try : from scipy . sparse import linalg r = linalg . norm ( M , ord = order ) except ( ImportError , AttributeError ) as e : print ( str ( e ) ) print ( "I can't import linalg tools for sparse matrices." ) print ( "Please upgrade your scipy version to 0.16.0." ) elif callable ( norm ) : r = norm ( M ) else : print ( "Unknown norm. Returning input as fallback" ) return r
Applies a normalization type to a sparse matrix .
6,224
def GC_wide ( genome , window = 1000 ) : GC = [ ] from Bio import SeqIO with open ( genome ) as handle : sequence = "" . join ( [ str ( record . seq ) for record in SeqIO . parse ( handle , "fasta" ) ] ) n = len ( sequence ) for i in range ( 0 , n , window ) : portion = sequence [ i : min ( i + window , n ) ] GC . append ( GC_partial ( portion ) ) return GC
Compute GC across a window of given length .
6,225
def to_dade_matrix ( M , annotations = "" , filename = None ) : n , m = M . shape A = np . zeros ( ( n + 1 , m + 1 ) ) A [ 1 : , 1 : ] = M if not annotations : annotations = np . array ( [ "" for _ in n ] , dtype = str ) A [ 0 , : ] = annotations A [ : , 0 ] = annotations . T if filename : try : np . savetxt ( filename , A , fmt = '%i' ) print ( "I saved input matrix in dade format as " + str ( filename ) ) except ValueError as e : print ( "I couldn't save input matrix." ) print ( str ( e ) ) finally : return A return A
Returns a Dade matrix from input numpy matrix . Any annotations are added as header . If filename is provided and valid said matrix is also saved as text .
6,226
def largest_connected_component ( matrix ) : try : import scipy . sparse n , components = scipy . sparse . csgraph . connected_components ( matrix , directed = False ) print ( "I found " + str ( n ) + " connected components." ) component_dist = collections . Counter ( components ) print ( "Distribution of components: " + str ( component_dist ) ) most_common , _ = component_dist . most_common ( 1 ) [ 0 ] ilcc = ( components == most_common ) return matrix [ : , ilcc ] [ ilcc ] except ImportError as e : print ( "I couldn't find scipy which is needed for graph routines." ) print ( str ( e ) ) print ( "Returning input matrix as fallback." ) return matrix
Compute the adjacency matrix of the largest connected component of the graph whose input matrix is adjacent .
6,227
def to_structure ( matrix , alpha = 1 ) : connected = largest_connected_component ( matrix ) distances = to_distance ( connected , alpha ) n , m = connected . shape bary = np . sum ( np . triu ( distances , 1 ) ) / ( n ** 2 ) d = np . array ( np . sum ( distances ** 2 , 0 ) / n - bary ) gram = np . array ( [ ( d [ i ] + d [ j ] - distances [ i ] [ j ] ** 2 ) / 2 for i , j in itertools . product ( range ( n ) , range ( m ) ) ] ) . reshape ( n , m ) normalized = gram / np . linalg . norm ( gram , 'fro' ) try : symmetric = np . array ( ( normalized + normalized . T ) / 2 , dtype = np . longfloat ) except AttributeError : symmetric = np . array ( ( normalized + normalized . T ) / 2 ) from scipy import linalg eigen_values , eigen_vectors = linalg . eigh ( symmetric ) if not ( eigen_values >= 0 ) . all ( ) : warnings . warn ( "Negative eigen values were found." ) idx = eigen_values . argsort ( ) [ - 3 : ] [ : : - 1 ] values = eigen_values [ idx ] vectors = eigen_vectors [ : , idx ] coordinates = vectors * np . sqrt ( values ) return coordinates
Compute best matching 3D genome structure from underlying input matrix using ShRec3D - derived method from Lesne et al . 2014 .
6,228
def get_missing_bins ( original , trimmed ) : original_diag = np . diag ( original ) trimmed_diag = np . diag ( trimmed ) index = [ ] m = min ( original . shape ) for j in range ( min ( trimmed . shape ) ) : k = 0 while original_diag [ j + k ] != trimmed_diag [ j ] and k < 2 * m : k += 1 index . append ( k + j ) return np . array ( index )
Retrieve indices of a trimmed matrix with respect to the original matrix . Fairly fast but is only correct if diagonal values are different which is always the case in practice .
6,229
def distance_to_contact ( D , alpha = 1 ) : if callable ( alpha ) : distance_function = alpha else : try : a = np . float64 ( alpha ) def distance_function ( x ) : return 1 / ( x ** ( 1 / a ) ) except TypeError : print ( "Alpha parameter must be callable or an array-like" ) raise except ZeroDivisionError : raise ValueError ( "Alpha parameter must be non-zero" ) m = np . max ( distance_function ( D [ D != 0 ] ) ) M = np . zeros ( D . shape ) M [ D != 0 ] = distance_function ( D [ D != 0 ] ) M [ D == 0 ] = m return M
Compute contact matrix from input distance matrix . Distance values of zeroes are given the largest contact count otherwise inferred non - zero distance values .
6,230
def pdb_to_structure ( filename ) : try : from Bio . PDB import PDB except ImportError : print ( "I can't import Biopython which is needed to handle PDB files." ) raise p = PDB . PDBParser ( ) structure = p . get_structure ( 'S' , filename ) for _ in structure . get_chains ( ) : atoms = [ np . array ( atom . get_coord ( ) ) for atom in structure . get_atoms ( ) ] return atoms
Import a structure object from a PDB file .
6,231
def positions_to_contigs ( positions ) : if isinstance ( positions , np . ndarray ) : flattened_positions = positions . flatten ( ) else : try : flattened_positions = np . array ( [ pos for contig in positions for pos in contig ] ) except TypeError : flattened_positions = np . array ( positions ) if ( np . diff ( positions ) == 0 ) . any ( ) and not ( 0 in set ( positions ) ) : warnings . warn ( "I detected identical consecutive nonzero values." ) return positions n = len ( flattened_positions ) contigs = np . ones ( n ) counter = 0 for i in range ( 1 , n ) : if positions [ i ] == 0 : counter += 1 contigs [ i ] += counter else : contigs [ i ] = contigs [ i - 1 ] return contigs
Flattens and converts a positions array to a contigs array if applicable .
6,232
def distance_diagonal_law ( matrix , positions = None ) : n = min ( matrix . shape ) if positions is None : return np . array ( [ np . average ( np . diagonal ( matrix , j ) ) for j in range ( n ) ] ) else : contigs = positions_to_contigs ( positions ) def is_intra ( i , j ) : return contigs [ i ] == contigs [ j ] max_intra_distance = max ( ( len ( contigs == u ) for u in set ( contigs ) ) ) intra_contacts = [ ] inter_contacts = [ np . average ( np . diagonal ( matrix , j ) ) for j in range ( max_intra_distance , n ) ] for j in range ( max_intra_distance ) : D = np . diagonal ( matrix , j ) for i in range ( len ( D ) ) : diagonal_intra = [ ] if is_intra ( i , j ) : diagonal_intra . append ( D [ i ] ) intra_contacts . append ( np . average ( np . array ( diagonal_intra ) ) ) intra_contacts . extend ( inter_contacts ) return [ positions , np . array ( intra_contacts ) ]
Compute a distance law trend using the contact averages of equal distances . Specific positions can be supplied if needed .
6,233
def rippe_parameters ( matrix , positions , lengths = None , init = None , circ = False ) : n , _ = matrix . shape if lengths is None : lengths = np . abs ( np . diff ( positions ) ) measurements , bins = [ ] , [ ] for i in range ( n ) : for j in range ( 1 , i ) : mean_length = ( lengths [ i ] + lengths [ j ] ) / 2. if positions [ i ] < positions [ j ] : d = ( ( ( positions [ j ] - positions [ i ] - lengths [ i ] ) + mean_length ) / 1000. ) else : d = ( ( ( positions [ i ] - positions [ j ] - lengths [ j ] ) + mean_length ) / 1000. ) bins . append ( np . abs ( d ) ) measurements . append ( matrix [ i , j ] ) parameters = estimate_param_rippe ( measurements , bins , init = init , circ = circ ) print ( parameters ) return parameters [ 0 ]
Estimate parameters from the model described in Rippe et al . 2001 .
6,234
def scalogram ( M , circ = False ) : if not type ( M ) is np . ndarray : M = np . array ( M ) if M . shape [ 0 ] != M . shape [ 1 ] : raise ValueError ( "Matrix is not square." ) try : n = min ( M . shape ) except AttributeError : n = M . size N = np . zeros ( M . shape ) for i in range ( n ) : for j in range ( n ) : if i + j < n and i >= j : N [ i , j ] = M [ i , i - j : i + j + 1 ] . sum ( ) elif circ and i + j < n and i < j : N [ i , j ] = M [ i , i - j : ] . sum ( ) + M [ i , : i + j + 1 ] . sum ( ) elif circ and i >= j and i + j >= n : N [ i , j ] = M [ i , i - j : ] . sum ( ) + M [ i , : i + j - n + 1 ] . sum ( ) elif circ and i < j and i + j >= n : N [ i , j ] = ( M [ i , i - j : ] . sum ( ) + M [ i , : ] . sum ( ) + M [ i , : i + j - n + 1 ] . sum ( ) ) return N
Computes so - called scalograms used to easily visualize contacts at different distance scales . Edge cases have been painstakingly taken care of .
6,235
def asd ( M1 , M2 ) : from scipy . fftpack import fft2 spectra1 = np . abs ( fft2 ( M1 ) ) spectra2 = np . abs ( fft2 ( M2 ) ) return np . linalg . norm ( spectra2 - spectra1 )
Compute a Fourier transform based distance between two matrices .
6,236
def remove_intra ( M , contigs ) : N = np . copy ( M ) n = len ( N ) assert n == len ( contigs ) for ( i , j ) in itertools . product ( range ( n ) , range ( n ) ) : if contigs [ i ] == contigs [ j ] : N [ i , j ] = 0 return N
Remove intrachromosomal contacts
6,237
def positions_to_contigs ( positions ) : contig_labels = np . zeros_like ( positions ) contig_index = 0 for i , p in enumerate ( positions ) : if p == 0 : contig_index += 1 contig_labels [ i ] = contig_index return contig_labels
Label contigs according to relative positions
6,238
def contigs_to_positions ( contigs , binning = 10000 ) : positions = np . zeros_like ( contigs ) index = 0 for _ , chunk in itertools . groubpy ( contigs ) : l = len ( chunk ) positions [ index : index + l ] = np . arange ( list ( chunk ) ) * binning index += l return positions
Build positions from contig labels
6,239
def split_matrix ( M , contigs ) : index = 0 for _ , chunk in itertools . groubpy ( contigs ) : l = len ( chunk ) yield M [ index : index + l , index : index + l ] index += l
Split multiple chromosome matrix
6,240
def find_nearest ( sorted_list , x ) : if x <= sorted_list [ 0 ] : return sorted_list [ 0 ] elif x >= sorted_list [ - 1 ] : return sorted_list [ - 1 ] else : lower = find_le ( sorted_list , x ) upper = find_ge ( sorted_list , x ) if ( x - lower ) > ( upper - x ) : return upper else : return lower
Find the nearest item of x from sorted array .
6,241
def format_x_tick ( axis , major_locator = None , major_formatter = None , minor_locator = None , minor_formatter = None ) : if major_locator : axis . xaxis . set_major_locator ( major_locator ) if major_formatter : axis . xaxis . set_major_formatter ( major_formatter ) if minor_locator : axis . xaxis . set_minor_locator ( minor_locator ) if minor_formatter : axis . xaxis . set_minor_formatter ( minor_formatter ) axis . autoscale_view ( ) plt . setp ( axis . xaxis . get_majorticklabels ( ) , rotation = 90 ) plt . setp ( axis . xaxis . get_minorticklabels ( ) , rotation = 90 ) axis . grid ( )
Set x axis s format .
6,242
def set_legend ( axis , lines , legend ) : try : if legend : axis . legend ( lines , legend ) except Exception as e : raise ValueError ( "invalid 'legend', Error: %s" % e )
Set line legend .
6,243
def get_max ( array ) : largest = - np . inf for i in array : try : if i > largest : largest = i except : pass if np . isinf ( largest ) : raise ValueError ( "there's no numeric value in array!" ) else : return largest
Get maximum value of an array . Automatically ignore invalid data .
6,244
def get_min ( array ) : smallest = np . inf for i in array : try : if i < smallest : smallest = i except : pass if np . isinf ( smallest ) : raise ValueError ( "there's no numeric value in array!" ) else : return smallest
Get minimum value of an array . Automatically ignore invalid data .
6,245
def get_yAxis_limit ( y , lower = 0.05 , upper = 0.2 ) : smallest = get_min ( y ) largest = get_max ( y ) gap = largest - smallest if gap >= 0.000001 : y_min = smallest - lower * gap y_max = largest + upper * gap else : y_min = smallest - lower * abs ( smallest ) y_max = largest + upper * abs ( largest ) return y_min , y_max
Find optimal y_min and y_max that guarantee enough space for legend and plot .
6,246
def create_figure ( width = 20 , height = 10 ) : figure = plt . figure ( figsize = ( width , height ) ) axis = figure . add_subplot ( 1 , 1 , 1 ) return figure , axis
Create a figure instance .
6,247
def preprocess_x_y ( x , y ) : def is_iterable_slicable ( a ) : if hasattr ( a , "__iter__" ) and hasattr ( a , "__getitem__" ) : return True else : return False if is_iterable_slicable ( x ) : if is_iterable_slicable ( x [ 0 ] ) : return x , y else : return ( x , ) , ( y , ) else : raise ValueError ( "invalid input!" )
Preprocess x y input data . Returns list of list style .
6,248
def execute ( input_params , engine , cwd = None ) : try : taskengine_exe = config . get ( 'engine' ) except NoConfigOptionError : raise TaskEngineNotFoundError ( "Task Engine config option not set." + "\nPlease verify the 'engine' configuration setting." ) if not os . path . exists ( taskengine_exe ) : raise TaskEngineNotFoundError ( "Task Engine executable not found." + "\nPlease verify the 'engine' configuration setting." ) engine_args = None try : engine_args = config . get ( 'engine-args' ) except NoConfigOptionError : pass environment = None config_environment = config . get_environment ( ) if config_environment : environment = os . environ . copy ( ) environment . update ( config_environment ) args = [ taskengine_exe , engine ] if engine_args : args . append ( engine_args ) startupinfo = None if sys . platform . startswith ( 'win' ) : startupinfo = subprocess . STARTUPINFO ( ) startupinfo . dwFlags |= subprocess . STARTF_USESHOWWINDOW input_json = json . dumps ( input_params ) process = Popen ( args , stdout = PIPE , stdin = PIPE , stderr = PIPE , cwd = cwd , env = environment , startupinfo = startupinfo ) stdout , stderr = process . communicate ( input = input_json . encode ( 'utf-8' ) ) if process . returncode != 0 : if stderr != '' : raise TaskEngineExecutionError ( stderr . decode ( 'utf-8' ) ) else : raise TaskEngineExecutionError ( 'Task Engine exited with code: ' + str ( process . returncode ) ) else : return json . loads ( stdout . decode ( 'utf-8' ) , object_pairs_hook = OrderedDict )
Execute a task with the provided input parameters
6,249
def run ( self , wrappers = [ "" , "" ] ) : opened_file = open ( self . lyfile , 'w' ) lilystring = self . piece_obj . toLily ( ) opened_file . writelines ( wrappers [ 0 ] + "\\version \"2.18.2\" \n" + lilystring + wrappers [ 1 ] ) opened_file . close ( ) os . system ( self . lily_script + " --loglevel=WARNING --output=" + self . folder + " " + self . lyfile )
run the lilypond script on the hierarchy class
6,250
def extract_fasta ( partition_file , fasta_file , output_dir , chunk_size = DEFAULT_CHUNK_SIZE , max_cores = DEFAULT_MAX_CORES , ) : genome = { record . id : record . seq for record in SeqIO . parse ( fasta_file , "fasta" ) } data_chunks = list ( zip ( * np . genfromtxt ( partition_file , usecols = ( 0 , 1 ) , dtype = None ) ) ) chunk_names = np . array ( data_chunks [ 0 ] , dtype = object ) cores = np . array ( data_chunks [ 1 ] ) for core in set ( cores ) : if core > max_cores : continue chunks_to_keep = chunk_names [ cores == core ] core_name = "core_{}.fa" . format ( core ) core_file = os . path . join ( output_dir , core_name ) with open ( core_file , "w" ) as core_handle : for name in chunks_to_keep : fields = name . split ( "_" ) header_name = "_" . join ( fields [ : - 1 ] ) chunk = int ( fields [ - 1 ] ) pos_start = chunk * chunk_size pos_end = min ( ( chunk + 1 ) * chunk_size , len ( genome [ header_name ] ) ) sequence = str ( genome [ header_name ] [ pos_start : pos_end ] ) core_handle . write ( ">{}\n" . format ( name ) ) core_handle . write ( "{}\n" . format ( sequence ) )
Extract sequences from bins
6,251
def merge_fasta ( fasta_file , output_dir ) : def chunk_lexicographic_order ( chunk ) : chunk_fields = chunk . split ( "_" ) chunk_name = chunk_fields [ : - 1 ] chunk_id = chunk_fields [ - 1 ] return ( chunk_name , int ( chunk_id ) ) def are_consecutive ( chunk1 , chunk2 ) : if None in { chunk1 , chunk2 } : return False else : ord1 = chunk_lexicographic_order ( chunk1 ) ord2 = chunk_lexicographic_order ( chunk2 ) return ( ord1 [ 0 ] == ord2 [ 0 ] ) and ( ord1 [ 1 ] == ord2 [ 1 ] + 1 ) def consecutiveness ( key_chunk_pair ) : key , chunk = key_chunk_pair chunk_name , chunk_id = chunk_lexicographic_order ( chunk ) return ( chunk_name , chunk_id - key ) genome = { record . id : record . seq for record in SeqIO . parse ( fasta_file , "fasta" ) } sorted_ids = sorted ( genome , key = chunk_lexicographic_order ) new_genome = dict ( ) for _ , g in itertools . groupby ( enumerate ( sorted_ids ) , consecutiveness ) : chunk_range = map ( operator . itemgetter ( 1 ) , g ) first_chunk = next ( chunk_range ) my_sequence = genome [ first_chunk ] my_chunk = None while "Reading chunk range" : try : my_chunk = next ( chunk_range ) my_sequence += genome [ my_chunk ] except StopIteration : break try : last_chunk_id = my_chunk . split ( "_" ) [ - 1 ] except AttributeError : last_chunk_id = "" if last_chunk_id : new_chunk_id = "{}_{}" . format ( first_chunk , last_chunk_id ) else : new_chunk_id = first_chunk new_genome [ new_chunk_id ] = my_sequence base_name = "." . join ( os . path . basename ( fasta_file ) . split ( "." ) [ : - 1 ] ) output_name = "{}_merged.fa" . format ( base_name ) merged_core_file = os . path . join ( output_dir , output_name ) with open ( merged_core_file , "w" ) as output_handle : for my_id in sorted ( new_genome , key = chunk_lexicographic_order ) : output_handle . write ( ">{}\n" . format ( my_id ) ) output_handle . write ( "{}\n" . format ( new_genome [ my_id ] ) )
Merge chunks into complete FASTA bins
6,252
def monitor ( ) : log = logging . getLogger ( __name__ ) loop = asyncio . get_event_loop ( ) asyncio . ensure_future ( console ( loop , log ) ) loop . run_forever ( )
Wrapper to call console with a loop .
6,253
def make_object ( cls , data ) : if issubclass ( cls , Object ) : self = object . __new__ ( cls ) self . _data = data else : self = data return self
Creates an API object of class cls setting its _data to data . Subclasses of Object are required to use this to build a new empty instance without using their constructor .
6,254
def String ( length = None , ** kwargs ) : return Property ( length = length , types = stringy_types , convert = to_string , ** kwargs )
A string valued property with max . length .
6,255
def Datetime ( null = True , ** kwargs ) : return Property ( types = datetime . datetime , convert = util . local_timezone , load = dateutil . parser . parse , null = null , ** kwargs )
A datetime property .
6,256
def InstanceOf ( cls , ** kwargs ) : return Property ( types = cls , load = cls . load , ** kwargs )
A property that is an instance of cls .
6,257
def ListOf ( cls , ** kwargs ) : def _list_load ( value ) : return [ cls . load ( d ) for d in value ] return Property ( types = list , load = _list_load , default = list , ** kwargs )
A property that is a list of cls .
6,258
def add_dimension ( self , name , data = None ) : self . dimensions . add ( name ) if data is None : valobj = self . __dimtype__ ( ) else : valobj = make_object ( self . __dimtype__ , data ) self . _data [ name ] = valobj setattr ( self , name , valobj ) return valobj
Add a named dimension to this entity .
6,259
def print_block ( self , section_key , f = sys . stdout , file_format = "mwtab" ) : if file_format == "mwtab" : for key , value in self [ section_key ] . items ( ) : if section_key == "METABOLOMICS WORKBENCH" and key not in ( "VERSION" , "CREATED_ON" ) : continue if key in ( "VERSION" , "CREATED_ON" ) : cw = 20 - len ( key ) elif key in ( "SUBJECT_SAMPLE_FACTORS" , ) : cw = 33 - len ( key ) else : cw = 30 - len ( key ) if "\n" in value : for line in value . split ( "\n" ) : print ( "{}{}{}\t{}" . format ( self . prefixes . get ( section_key , "" ) , key , cw * " " , line ) , file = f ) elif key == "SUBJECT_SAMPLE_FACTORS" : for factor in value : print ( "{}{}\t{}" . format ( key , cw * " " , "\t" . join ( factor . values ( ) ) ) , file = f ) elif key . endswith ( ":UNITS" ) : print ( "{}\t{}" . format ( key , value ) , file = f ) elif key . endswith ( "_RESULTS_FILE" ) : if isinstance ( value , dict ) : print ( "{}{} \t{}\t{}:{}" . format ( self . prefixes . get ( section_key , "" ) , * [ i for pair in value . items ( ) for i in pair ] ) , file = f ) else : print ( "{}{}{}\t{}" . format ( self . prefixes . get ( section_key , "" ) , key , cw * " " , value ) , file = f ) elif key . endswith ( "_START" ) : start_key = key end_key = "{}{}" . format ( start_key [ : - 5 ] , "END" ) print ( start_key , file = f ) for data_key in value : if data_key in ( "Samples" , "Factors" ) : print ( "{}\t{}" . format ( data_key , "\t" . join ( self [ section_key ] [ key ] [ data_key ] ) ) , file = f ) elif data_key in ( "Fields" , ) : print ( "{}" . format ( "\t" . join ( self [ section_key ] [ key ] [ data_key ] ) ) , file = f ) elif data_key == "DATA" : for data in self [ section_key ] [ key ] [ data_key ] : print ( "\t" . join ( data . values ( ) ) , file = f ) print ( end_key , file = f ) else : print ( "{}{}{}\t{}" . format ( self . prefixes . get ( section_key , "" ) , key , cw * " " , value ) , file = f ) elif file_format == "json" : print ( json . dumps ( self [ section_key ] , sort_keys = False , indent = 4 ) , file = f )
Print mwtab section into a file or stdout .
6,260
def _is_mwtab ( string ) : if isinstance ( string , str ) : lines = string . split ( "\n" ) elif isinstance ( string , bytes ) : lines = string . decode ( "utf-8" ) . split ( "\n" ) else : raise TypeError ( "Expecting <class 'str'> or <class 'bytes'>, but {} was passed" . format ( type ( string ) ) ) lines = [ line for line in lines if line ] header = lines [ 0 ] if header . startswith ( "#METABOLOMICS WORKBENCH" ) : return "\n" . join ( lines ) return False
Test if input string is in mwtab format .
6,261
def getTraceIdsBySpanName ( self , service_name , span_name , end_ts , limit , order ) : self . send_getTraceIdsBySpanName ( service_name , span_name , end_ts , limit , order ) return self . recv_getTraceIdsBySpanName ( )
Fetch trace ids by service and span name . Gets limit number of entries from before the end_ts .
6,262
def getTraceIdsByServiceName ( self , service_name , end_ts , limit , order ) : self . send_getTraceIdsByServiceName ( service_name , end_ts , limit , order ) return self . recv_getTraceIdsByServiceName ( )
Fetch trace ids by service name . Gets limit number of entries from before the end_ts .
6,263
def getTraceIdsByAnnotation ( self , service_name , annotation , value , end_ts , limit , order ) : self . send_getTraceIdsByAnnotation ( service_name , annotation , value , end_ts , limit , order ) return self . recv_getTraceIdsByAnnotation ( )
Fetch trace ids with a particular annotation . Gets limit number of entries from before the end_ts .
6,264
def getTracesByIds ( self , trace_ids , adjust ) : self . send_getTracesByIds ( trace_ids , adjust ) return self . recv_getTracesByIds ( )
Get the full traces associated with the given trace ids .
6,265
def getTraceSummariesByIds ( self , trace_ids , adjust ) : self . send_getTraceSummariesByIds ( trace_ids , adjust ) return self . recv_getTraceSummariesByIds ( )
Fetch trace summaries for the given trace ids .
6,266
def getTraceCombosByIds ( self , trace_ids , adjust ) : self . send_getTraceCombosByIds ( trace_ids , adjust ) return self . recv_getTraceCombosByIds ( )
Not content with just one of traces summaries or timelines? Want it all? This is the method for you .
6,267
def setTraceTimeToLive ( self , trace_id , ttl_seconds ) : self . send_setTraceTimeToLive ( trace_id , ttl_seconds ) self . recv_setTraceTimeToLive ( )
Change the TTL of a trace . If we find an interesting trace we want to keep around for further investigation .
6,268
def discover_datasource_columns ( datastore_str , datasource_id ) : datastore = DataStore ( datastore_str ) datasource = datastore . get_datasource ( datasource_id ) if datasource . type != "RASTER" : return datasource . list_columns ( ) else : return [ ]
Loop through the datastore s datasources to find the datasource identified by datasource_id return the matching datasource s columns .
6,269
def _get_column_type ( self , column ) : ctype = column . GetType ( ) if ctype in [ ogr . OFTInteger , ogr . OFTReal ] : return 'numeric' else : return 'string'
Return numeric if the column is of type integer or real otherwise return string .
6,270
def _get_default_mapfile_excerpt ( self ) : layerobj = self . _get_layer_stub ( ) classobj = mapscript . classObj ( ) layerobj . insertClass ( classobj ) styleobj = self . _get_default_style ( ) classobj . insertStyle ( styleobj ) return mapserializer . layerobj_to_dict ( layerobj , None )
Given an OGR string an OGR connection and an OGR layer create and return a representation of a MapFile LAYER block .
6,271
def _get_layer_stub ( self ) : layerobj = mapscript . layerObj ( ) layerobj . name = self . name layerobj . status = mapscript . MS_ON projection = self . ogr_layer . GetSpatialRef ( ) featureIdColumn = self . _get_featureId_column ( ) if featureIdColumn is not None and featureIdColumn != '' : layerobj . metadata . set ( 'gml_featureid' , featureIdColumn ) if projection is not None : layerobj . setProjection ( projection . ExportToProj4 ( ) ) if self . datastore . connection_type == "directory" : datastr = os . path . normpath ( self . datastore . datastore_str + "/" + self . name ) if os . path . exists ( datastr + '.shp' ) : datastr = datastr + '.shp' elif os . path . exists ( datastr + '.SHP' ) : datastr = datastr + '.SHP' layerobj . data = datastr elif self . datastore . connection_type == "postgis" : layerobj . connectiontype = mapscript . MS_POSTGIS layerobj . connection = self . datastore . datastore_str [ 3 : ] . strip ( ) if featureIdColumn is not None and featureIdColumn != '' : layerobj . data = "%s from %s using unique %s" % ( self . ogr_layer . GetGeometryColumn ( ) , self . name , featureIdColumn ) else : layerobj . data = "%s from %s" % ( self . ogr_layer . GetGeometryColumn ( ) , self . name ) else : raise RuntimeError ( "unsupported connection type" ) if self . type == 'POINT' : layerobj . type = mapscript . MS_LAYER_POINT elif self . type == 'POLYGON' : layerobj . type = mapscript . MS_LAYER_POLYGON else : layerobj . type = mapscript . MS_LAYER_LINE return layerobj
builds a minimal mapscript layerobj with no styling
6,272
def reelect_app ( self , request , app ) : app . disconnect ( ) endpoints_size = len ( app . locator . endpoints ) for _ in xrange ( 0 , endpoints_size + 1 ) : if len ( app . locator . endpoints ) == 0 : request . logger . info ( "giving up on connecting to dist-info hosts, falling back to common pool processing" ) app = yield self . proxy . reelect_app ( request , app ) raise gen . Return ( app ) try : locator = Locator ( endpoints = app . locator . endpoints ) request . logger . info ( "connecting to locator %s" , locator . endpoints [ 0 ] ) yield gen . with_timeout ( self . service_connect_timeout , locator . connect ( ) ) request . logger . debug ( "connected to locator %s for %s" , locator . endpoints [ 0 ] , app . name ) app = Service ( app . name , locator = locator , timeout = RESOLVE_TIMEOUT ) yield gen . with_timeout ( self . service_connect_timeout , app . connect ( ) ) request . logger . debug ( "connected to application %s via %s" , app . name , app . endpoints ) except gen . TimeoutError : request . logger . warning ( "timed out while connecting to application" ) continue except ServiceError as err : request . logger . warning ( "got error while resolving app - %s" , err ) if err . category in LOCATORCATEGORY and err . code == ESERVICENOTAVAILABLE : continue else : raise err finally : app . locator . endpoints = app . locator . endpoints [ 1 : ] raise gen . Return ( app ) raise PluginApplicationError ( 42 , 42 , "could not connect to application" )
tries to connect to the same app on differnet host from dist - info
6,273
def RecordHelloWorld ( handler , t ) : url = "%s/receive_recording.py" % THIS_URL t . startRecording ( url ) t . say ( "Hello, World." ) t . stopRecording ( ) json = t . RenderJson ( ) logging . info ( "RecordHelloWorld json: %s" % json ) handler . response . out . write ( json )
Demonstration of recording a message .
6,274
def RedirectDemo ( handler , t ) : t . redirect ( SIP_PHONE ) json = t . RenderJson ( ) logging . info ( "RedirectDemo json: %s" % json ) handler . response . out . write ( json )
Demonstration of redirecting to another number .
6,275
def TransferDemo ( handler , t ) : t . say ( "One moment please." ) t . transfer ( MY_PHONE ) t . say ( "Hi. I am a robot" ) json = t . RenderJson ( ) logging . info ( "TransferDemo json: %s" % json ) handler . response . out . write ( json )
Demonstration of transfering to another number
6,276
def retry ( ExceptionToCheck , tries = 4 , delay = 3 , backoff = 2 , status_codes = [ ] , logger = None ) : if backoff is None or backoff <= 0 : raise ValueError ( "backoff must be a number greater than 0" ) tries = math . floor ( tries ) if tries < 0 : raise ValueError ( "tries must be a number 0 or greater" ) if delay is None or delay <= 0 : raise ValueError ( "delay must be a number greater than 0" ) def deco_retry ( f ) : def f_retry ( * args , ** kwargs ) : mtries , mdelay = tries , delay while mtries > 1 : try : return f ( * args , ** kwargs ) except ExceptionToCheck as err : if ( type ( err ) is DataFailureException and len ( status_codes ) and err . status not in status_codes ) : raise if logger : logger . warning ( '%s: %s, Retrying in %s seconds.' % ( f . __name__ , err , mdelay ) ) time . sleep ( mdelay ) mtries -= 1 mdelay *= backoff return f ( * args , ** kwargs ) return f_retry return deco_retry
Decorator function for retrying the decorated function using an exponential or fixed backoff .
6,277
def _custom_response_edit ( self , method , url , headers , body , response ) : if self . get_implementation ( ) . is_mock ( ) : delay = self . get_setting ( "MOCKDATA_DELAY" , 0.0 ) time . sleep ( delay ) self . _edit_mock_response ( method , url , headers , body , response )
This method allows a service to edit a response .
6,278
def postURL ( self , url , headers = { } , body = None ) : return self . _load_resource ( "POST" , url , headers , body )
Request a URL using the HTTP method POST .
6,279
def putURL ( self , url , headers , body = None ) : return self . _load_resource ( "PUT" , url , headers , body )
Request a URL using the HTTP method PUT .
6,280
def patchURL ( self , url , headers , body ) : return self . _load_resource ( "PATCH" , url , headers , body )
Request a URL using the HTTP method PATCH .
6,281
def setup_dir ( f ) : setup_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) def wrapped ( * args , ** kwargs ) : with chdir ( setup_dir ) : return f ( * args , ** kwargs ) return wrapped
Decorate f to run inside the directory where setup . py resides .
6,282
def feedback_form ( context ) : user = None url = None if context . get ( 'request' ) : url = context [ 'request' ] . path if context [ 'request' ] . user . is_authenticated ( ) : user = context [ 'request' ] . user return { 'form' : FeedbackForm ( url = url , user = user ) , 'background_color' : FEEDBACK_FORM_COLOR , 'text_color' : FEEDBACK_FORM_TEXTCOLOR , 'text' : FEEDBACK_FORM_TEXT , }
Template tag to render a feedback form .
6,283
def select ( self , * itms ) : if not itms : itms = [ '*' ] self . terms . append ( "select %s from %s" % ( ', ' . join ( itms ) , self . table ) ) return self
Joins the items to be selected and inserts the current table name
6,284
def _in ( self , * lst ) : self . terms . append ( 'in (%s)' % ', ' . join ( [ '"%s"' % x for x in lst ] ) ) return self
Build out the in clause . Using _in due to shadowing for in
6,285
def compile ( self ) : cs = "" for term in self . terms : if cs : cs += " " cs += term self . compiled_str = urllib . parse . quote ( cs ) return self
Take all of the parts components and build the complete query to be passed to Yahoo YQL
6,286
def read_files ( * sources , ** kwds ) : filenames = _generate_filenames ( sources ) filehandles = _generate_handles ( filenames ) for fh , source in filehandles : try : f = mwtab . MWTabFile ( source ) f . read ( fh ) if kwds . get ( 'validate' ) : validator . validate_file ( mwtabfile = f , section_schema_mapping = mwschema . section_schema_mapping , validate_samples = True , validate_factors = True ) yield f if VERBOSE : print ( "Processed file: {}" . format ( os . path . abspath ( source ) ) ) except Exception as e : if VERBOSE : print ( "Error processing file: " , os . path . abspath ( source ) , "\nReason:" , e ) pass
Construct a generator that yields file instances .
6,287
def is_url ( path ) : try : parse_result = urlparse ( path ) return all ( ( parse_result . scheme , parse_result . netloc , parse_result . path ) ) except ValueError : return False
Test if path represents a valid URL .
6,288
def AuthMiddleware ( app ) : basic_redirect_form = BasicRedirectFormPlugin ( login_form_url = "/signin" , login_handler_path = "/login" , post_login_url = "/" , logout_handler_path = "/logout" , post_logout_url = "/signin" , rememberer_name = "cookie" ) return setup_sql_auth ( app , user_class = model . User , group_class = model . Group , permission_class = model . Permission , dbsession = model . meta . Session , form_plugin = basic_redirect_form , cookie_secret = config [ 'cookie_secret' ] , translations = { 'user_name' : 'login' , 'users' : 'users' , 'group_name' : 'name' , 'groups' : 'groups' , 'permission_name' : 'name' , 'permissions' : 'permissions' , 'validate_password' : 'validate_password' } , )
Add authentication and authorization middleware to the app .
6,289
def _get_full_path ( self , path , environ ) : if path . startswith ( '/' ) : path = environ . get ( 'SCRIPT_NAME' , '' ) + path return path
Return the full path to path by prepending the SCRIPT_NAME . If path is a URL do nothing .
6,290
def _replace_qs ( self , url , qs ) : url_parts = list ( urlparse ( url ) ) url_parts [ 4 ] = qs return urlunparse ( url_parts )
Replace the query string of url with qs and return the new URL .
6,291
def write ( self ) : with open ( storage . config_file , 'w' ) as cfg : yaml . dump ( self . as_dict ( ) , cfg , default_flow_style = False ) storage . refresh ( )
write the current settings to the config file
6,292
def process_bind_param ( self , value , dialect ) : if value is not None : value = simplejson . dumps ( value ) return value
convert value from python object to json
6,293
def process_result_value ( self , value , dialect ) : if value is not None : value = simplejson . loads ( value ) return value
convert value from json to a python object
6,294
def getBriefModuleInfoFromFile ( fileName ) : modInfo = BriefModuleInfo ( ) _cdmpyparser . getBriefModuleInfoFromFile ( modInfo , fileName ) modInfo . flush ( ) return modInfo
Builds the brief module info from file
6,295
def getBriefModuleInfoFromMemory ( content ) : modInfo = BriefModuleInfo ( ) _cdmpyparser . getBriefModuleInfoFromMemory ( modInfo , content ) modInfo . flush ( ) return modInfo
Builds the brief module info from memory
6,296
def getDisplayName ( self ) : if self . alias == "" : return self . name return self . name + " as " + self . alias
Provides a name for display purpose respecting the alias
6,297
def flush ( self ) : self . __flushLevel ( 0 ) if self . __lastImport is not None : self . imports . append ( self . __lastImport )
Flushes the collected information
6,298
def __flushLevel ( self , level ) : objectsCount = len ( self . objectsStack ) while objectsCount > level : lastIndex = objectsCount - 1 if lastIndex == 0 : if self . objectsStack [ 0 ] . __class__ . __name__ == "Class" : self . classes . append ( self . objectsStack [ 0 ] ) else : self . functions . append ( self . objectsStack [ 0 ] ) self . objectsStack = [ ] break if self . objectsStack [ lastIndex ] . __class__ . __name__ == "Class" : self . objectsStack [ lastIndex - 1 ] . classes . append ( self . objectsStack [ lastIndex ] ) else : self . objectsStack [ lastIndex - 1 ] . functions . append ( self . objectsStack [ lastIndex ] ) del self . objectsStack [ lastIndex ] objectsCount -= 1
Merge the found objects to the required level
6,299
def _onEncoding ( self , encString , line , pos , absPosition ) : self . encoding = Encoding ( encString , line , pos , absPosition )
Memorizes module encoding