idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
2,300
|
def add_text_to_image ( fname , txt , opFilename ) : ft = ImageFont . load ( "T://user//dev//src//python//_AS_LIB//timR24.pil" ) print ( "Adding text " , txt , " to " , fname , " pixels wide to file " , opFilename ) im = Image . open ( fname ) draw = ImageDraw . Draw ( im ) draw . text ( ( 0 , 0 ) , txt , fill = ( 0 , 0 , 0 ) , font = ft ) del draw im . save ( opFilename )
|
convert an image by adding text
|
2,301
|
def add_crosshair_to_image ( fname , opFilename ) : im = Image . open ( fname ) draw = ImageDraw . Draw ( im ) draw . line ( ( 0 , 0 ) + im . size , fill = ( 255 , 255 , 255 ) ) draw . line ( ( 0 , im . size [ 1 ] , im . size [ 0 ] , 0 ) , fill = ( 255 , 255 , 255 ) ) del draw im . save ( opFilename )
|
convert an image by adding a cross hair
|
2,302
|
def filter_contour ( imageFile , opFile ) : im = Image . open ( imageFile ) im1 = im . filter ( ImageFilter . CONTOUR ) im1 . save ( opFile )
|
convert an image by applying a contour
|
2,303
|
def get_img_hash ( image , hash_size = 8 ) : image = image . resize ( ( hash_size + 1 , hash_size ) , Image . ANTIALIAS , ) pixels = list ( image . getdata ( ) ) difference = [ ] for row in range ( hash_size ) : for col in range ( hash_size ) : pixel_left = image . getpixel ( ( col , row ) ) pixel_right = image . getpixel ( ( col + 1 , row ) ) difference . append ( pixel_left > pixel_right ) decimal_value = 0 hex_string = [ ] for index , value in enumerate ( difference ) : if value : decimal_value += 2 ** ( index % 8 ) if ( index % 8 ) == 7 : hex_string . append ( hex ( decimal_value ) [ 2 : ] . rjust ( 2 , '0' ) ) decimal_value = 0 return '' . join ( hex_string )
|
Grayscale and shrink the image in one step
|
2,304
|
def load_image ( fname ) : with open ( fname , "rb" ) as f : i = Image . open ( fname ) return i
|
read an image from file - PIL doesnt close nicely
|
2,305
|
def dump_img ( fname ) : img = Image . open ( fname ) width , _ = img . size txt = '' pixels = list ( img . getdata ( ) ) for col in range ( width ) : txt += str ( pixels [ col : col + width ] ) return txt
|
output the image as text
|
2,306
|
def NormInt ( df , sampleA , sampleB ) : c1 = df [ sampleA ] c2 = df [ sampleB ] return np . log10 ( np . sqrt ( c1 * c2 ) )
|
Normalizes intensities of a gene in two samples
|
2,307
|
def is_prime ( number ) : if number < 2 : return False if number % 2 == 0 : return number == 2 limit = int ( math . sqrt ( number ) ) for divisor in range ( 3 , limit + 1 , 2 ) : if number % divisor == 0 : return False return True
|
Testing given number to be a prime .
|
2,308
|
def qmed_all_methods ( self ) : result = { } for method in self . methods : try : result [ method ] = getattr ( self , '_qmed_from_' + method ) ( ) except : result [ method ] = None return result
|
Returns a dict of QMED methods using all available methods .
|
2,309
|
def _qmed_from_amax_records ( self ) : valid_flows = valid_flows_array ( self . catchment ) n = len ( valid_flows ) if n < 2 : raise InsufficientDataError ( "Insufficient annual maximum flow records available for catchment {}." . format ( self . catchment . id ) ) return np . median ( valid_flows )
|
Return QMED estimate based on annual maximum flow records .
|
2,310
|
def _pot_month_counts ( self , pot_dataset ) : periods = pot_dataset . continuous_periods ( ) result = [ set ( ) for x in range ( 12 ) ] for period in periods : year = period . start_date . year month = period . start_date . month while True : result [ month - 1 ] . add ( year ) if year == period . end_date . year and month == period . end_date . month : break month += 1 if month == 13 : month = 1 year += 1 return result
|
Return a list of 12 sets . Each sets contains the years included in the POT record period .
|
2,311
|
def _qmed_from_area ( self ) : try : return 1.172 * self . catchment . descriptors . dtm_area ** self . _area_exponent ( ) except ( TypeError , KeyError ) : raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." )
|
Return QMED estimate based on catchment area .
|
2,312
|
def _qmed_from_descriptors_1999 ( self , as_rural = False ) : try : qmed_rural = 1.172 * self . catchment . descriptors . dtm_area ** self . _area_exponent ( ) * ( self . catchment . descriptors . saar / 1000.0 ) ** 1.560 * self . catchment . descriptors . farl ** 2.642 * ( self . catchment . descriptors . sprhost / 100.0 ) ** 1.211 * 0.0198 ** self . _residual_soil ( ) if as_rural : return qmed_rural else : return qmed_rural * self . urban_adj_factor ( ) except ( TypeError , KeyError ) : raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." )
|
Return QMED estimation based on FEH catchment descriptors 1999 methodology .
|
2,313
|
def _qmed_from_descriptors_2008 ( self , as_rural = False , donor_catchments = None ) : try : lnqmed_rural = 2.1170 + 0.8510 * log ( self . catchment . descriptors . dtm_area ) - 1.8734 * 1000 / self . catchment . descriptors . saar + 3.4451 * log ( self . catchment . descriptors . farl ) - 3.0800 * self . catchment . descriptors . bfihost ** 2.0 qmed_rural = exp ( lnqmed_rural ) self . results_log [ 'qmed_descr_rural' ] = qmed_rural if donor_catchments is None : donor_catchments = self . find_donor_catchments ( ) if donor_catchments : weights = self . _vec_alpha ( donor_catchments ) errors = self . _vec_lnqmed_residuals ( donor_catchments ) correction = np . dot ( weights , errors ) lnqmed_rural += correction qmed_rural = exp ( lnqmed_rural ) self . results_log [ 'donors' ] = donor_catchments for i , donor in enumerate ( self . results_log [ 'donors' ] ) : donor . weight = weights [ i ] donor . factor = exp ( errors [ i ] ) self . results_log [ 'donor_adj_factor' ] = exp ( correction ) self . results_log [ 'qmed_adj_rural' ] = qmed_rural if as_rural : return qmed_rural else : urban_adj_factor = self . urban_adj_factor ( ) self . results_log [ 'qmed_descr_urban' ] = self . results_log [ 'qmed_descr_rural' ] * urban_adj_factor return qmed_rural * urban_adj_factor except ( TypeError , KeyError ) : raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." )
|
Return QMED estimation based on FEH catchment descriptors 2008 methodology .
|
2,314
|
def _pruaf ( self ) : return 1 + 0.47 * self . catchment . descriptors . urbext ( self . year ) * self . catchment . descriptors . bfihost / ( 1 - self . catchment . descriptors . bfihost )
|
Return percentage runoff urban adjustment factor .
|
2,315
|
def _dist_corr ( dist , phi1 , phi2 , phi3 ) : return phi1 * exp ( - phi2 * dist ) + ( 1 - phi1 ) * exp ( - phi3 * dist )
|
Generic distance - decaying correlation function
|
2,316
|
def _vec_b ( self , donor_catchments ) : p = len ( donor_catchments ) b = 0.1175 * np . ones ( p ) for i in range ( p ) : b [ i ] *= self . _model_error_corr ( self . catchment , donor_catchments [ i ] ) return b
|
Return vector b of model error covariances to estimate weights
|
2,317
|
def _beta ( catchment ) : lnbeta = - 1.1221 - 0.0816 * log ( catchment . descriptors . dtm_area ) - 0.4580 * log ( catchment . descriptors . saar / 1000 ) + 0.1065 * log ( catchment . descriptors . bfihost ) return exp ( lnbeta )
|
Return beta the GLO scale parameter divided by loc parameter estimated using simple regression model
|
2,318
|
def _matrix_sigma_eta ( self , donor_catchments ) : p = len ( donor_catchments ) sigma = 0.1175 * np . ones ( ( p , p ) ) for i in range ( p ) : for j in range ( p ) : if i != j : sigma [ i , j ] *= self . _model_error_corr ( donor_catchments [ i ] , donor_catchments [ j ] ) return sigma
|
Return model error coveriance matrix Sigma eta
|
2,319
|
def _matrix_sigma_eps ( self , donor_catchments ) : p = len ( donor_catchments ) sigma = np . empty ( ( p , p ) ) for i in range ( p ) : beta_i = self . _beta ( donor_catchments [ i ] ) n_i = donor_catchments [ i ] . amax_records_end ( ) - donor_catchments [ i ] . amax_records_start ( ) + 1 for j in range ( p ) : beta_j = self . _beta ( donor_catchments [ j ] ) n_j = donor_catchments [ j ] . amax_records_end ( ) - donor_catchments [ j ] . amax_records_start ( ) + 1 rho_ij = self . _lnqmed_corr ( donor_catchments [ i ] , donor_catchments [ j ] ) n_ij = min ( donor_catchments [ i ] . amax_records_end ( ) , donor_catchments [ j ] . amax_records_end ( ) ) - max ( donor_catchments [ i ] . amax_records_start ( ) , donor_catchments [ j ] . amax_records_start ( ) ) + 1 sigma [ i , j ] = 4 * beta_i * beta_j * n_ij / n_i / n_j * rho_ij return sigma
|
Return sampling error coveriance matrix Sigma eta
|
2,320
|
def _vec_alpha ( self , donor_catchments ) : return np . dot ( linalg . inv ( self . _matrix_omega ( donor_catchments ) ) , self . _vec_b ( donor_catchments ) )
|
Return vector alpha which is the weights for donor model errors
|
2,321
|
def find_donor_catchments ( self , limit = 6 , dist_limit = 500 ) : if self . gauged_catchments : return self . gauged_catchments . nearest_qmed_catchments ( self . catchment , limit , dist_limit ) else : return [ ]
|
Return a suitable donor catchment to improve a QMED estimate based on catchment descriptors alone .
|
2,322
|
def _var_and_skew ( self , catchments , as_rural = False ) : if not hasattr ( catchments , '__getitem__' ) : l_cv , l_skew = self . _l_cv_and_skew ( self . catchment ) self . results_log [ 'donors' ] = [ ] else : n = len ( catchments ) l_cvs = np . empty ( n ) l_skews = np . empty ( n ) l_cv_weights = np . empty ( n ) l_skew_weights = np . empty ( n ) for index , donor in enumerate ( catchments ) : l_cvs [ index ] , l_skews [ index ] = self . _l_cv_and_skew ( donor ) l_cv_weights [ index ] = self . _l_cv_weight ( donor ) l_skew_weights [ index ] = self . _l_skew_weight ( donor ) l_cv_weights /= sum ( l_cv_weights ) if self . _similarity_distance ( self . catchment , catchments [ 0 ] ) == 0 : l_cv_weights *= self . _l_cv_weight_factor ( ) l_cv_weights [ 0 ] += 1 - sum ( l_cv_weights ) l_cv_rural = sum ( l_cv_weights * l_cvs ) l_skew_weights /= sum ( l_skew_weights ) l_skew_rural = sum ( l_skew_weights * l_skews ) self . results_log [ 'l_cv_rural' ] = l_cv_rural self . results_log [ 'l_skew_rural' ] = l_skew_rural if as_rural : l_cv = l_cv_rural l_skew = l_skew_rural else : l_cv = l_cv_rural * 0.5547 ** self . catchment . descriptors . urbext ( self . year ) l_skew = ( l_skew_rural + 1 ) * 1.1545 ** self . catchment . descriptors . urbext ( self . year ) - 1 self . results_log [ 'donors' ] = catchments total_record_length = 0 for index , donor in enumerate ( self . results_log [ 'donors' ] ) : donor . l_cv = l_cvs [ index ] donor . l_cv_weight = l_cv_weights [ index ] donor . l_skew = l_skews [ index ] donor . l_skew_weight = l_skew_weights [ index ] total_record_length += donor . record_length self . results_log [ 'donors_record_length' ] = total_record_length self . results_log [ 'l_cv' ] = l_cv self . results_log [ 'l_skew' ] = l_skew return l_cv , l_skew
|
Calculate L - CV and L - SKEW from a single catchment or a pooled group of catchments .
|
2,323
|
def _l_cv_and_skew ( self , catchment ) : z = self . _dimensionless_flows ( catchment ) l1 , l2 , t3 = lm . lmom_ratios ( z , nmom = 3 ) return l2 / l1 , t3
|
Calculate L - CV and L - SKEW for a gauged catchment . Uses lmoments3 library .
|
2,324
|
def _l_cv_weight ( self , donor_catchment ) : try : dist = donor_catchment . similarity_dist except AttributeError : dist = self . _similarity_distance ( self . catchment , donor_catchment ) b = 0.0047 * sqrt ( dist ) + 0.0023 / 2 c = 0.02609 / ( donor_catchment . record_length - 1 ) return 1 / ( b + c )
|
Return L - CV weighting for a donor catchment .
|
2,325
|
def _l_cv_weight_factor ( self ) : b = 0.0047 * sqrt ( 0 ) + 0.0023 / 2 c = 0.02609 / ( self . catchment . record_length - 1 ) return c / ( b + c )
|
Return multiplier for L - CV weightings in case of enhanced single site analysis .
|
2,326
|
def _l_skew_weight ( self , donor_catchment ) : try : dist = donor_catchment . similarity_dist except AttributeError : dist = self . _similarity_distance ( self . catchment , donor_catchment ) b = 0.0219 * ( 1 - exp ( - dist / 0.2360 ) ) c = 0.2743 / ( donor_catchment . record_length - 2 ) return 1 / ( b + c )
|
Return L - SKEW weighting for donor catchment .
|
2,327
|
def _growth_curve_single_site ( self , distr = 'glo' ) : if self . catchment . amax_records : self . donor_catchments = [ ] return GrowthCurve ( distr , * self . _var_and_skew ( self . catchment ) ) else : raise InsufficientDataError ( "Catchment's `amax_records` must be set for a single site analysis." )
|
Return flood growth curve function based on amax_records from the subject catchment only .
|
2,328
|
def _growth_curve_pooling_group ( self , distr = 'glo' , as_rural = False ) : if not self . donor_catchments : self . find_donor_catchments ( ) gc = GrowthCurve ( distr , * self . _var_and_skew ( self . donor_catchments ) ) self . results_log [ 'distr_name' ] = distr . upper ( ) self . results_log [ 'distr_params' ] = gc . params return gc
|
Return flood growth curve function based on amax_records from a pooling group .
|
2,329
|
def process ( self , document ) : content = json . dumps ( document ) versions = { } versions . update ( { 'Spline' : Version ( VERSION ) } ) versions . update ( self . get_version ( "Bash" , self . BASH_VERSION ) ) if content . find ( '"docker(container)":' ) >= 0 or content . find ( '"docker(image)":' ) >= 0 : versions . update ( VersionsCheck . get_version ( "Docker" , self . DOCKER_VERSION ) ) if content . find ( '"packer":' ) >= 0 : versions . update ( VersionsCheck . get_version ( "Packer" , self . PACKER_VERSION ) ) if content . find ( '"ansible(simple)":' ) >= 0 : versions . update ( VersionsCheck . get_version ( 'Ansible' , self . ANSIBLE_VERSION ) ) return versions
|
Logging versions of required tools .
|
2,330
|
def get_version ( tool_name , tool_command ) : result = { } for line in Bash ( ShellConfig ( script = tool_command , internal = True ) ) . process ( ) : if line . find ( "command not found" ) >= 0 : VersionsCheck . LOGGER . error ( "Required tool '%s' not found (stopping pipeline)!" , tool_name ) sys . exit ( 1 ) else : version = list ( re . findall ( r'(\d+(\.\d+)+)+' , line ) ) [ 0 ] [ 0 ] result = { tool_name : Version ( str ( version ) ) } break return result
|
Get name and version of a tool defined by given command .
|
2,331
|
def process ( self , versions ) : for tool_name in sorted ( versions . keys ( ) ) : version = versions [ tool_name ] self . _log ( "Using tool '%s', %s" % ( tool_name , version ) )
|
Logging version sorted ascending by tool name .
|
2,332
|
def register_event ( self , * names ) : for name in names : if name in self . __events : continue self . __events [ name ] = Event ( name )
|
Registers new events after instance creation
|
2,333
|
def emit ( self , name , * args , ** kwargs ) : e = self . __property_events . get ( name ) if e is None : e = self . __events [ name ] return e ( * args , ** kwargs )
|
Dispatches an event to any subscribed listeners
|
2,334
|
def get_dispatcher_event ( self , name ) : e = self . __property_events . get ( name ) if e is None : e = self . __events [ name ] return e
|
Retrieves an Event object by name
|
2,335
|
def emission_lock ( self , name ) : e = self . __property_events . get ( name ) if e is None : e = self . __events [ name ] return e . emission_lock
|
Holds emission of events and dispatches the last event on release
|
2,336
|
def TEST ( fname ) : m = MapObject ( fname , os . path . join ( os . getcwd ( ) , 'img_prog_results' ) ) m . add_layer ( ImagePathFollow ( 'border' ) ) m . add_layer ( ImagePathFollow ( 'river' ) ) m . add_layer ( ImagePathFollow ( 'road' ) ) m . add_layer ( ImageArea ( 'sea' , col = 'Blue' , density = 'light' ) ) m . add_layer ( ImageArea ( 'desert' , col = 'Yellow' , density = 'med' ) ) m . add_layer ( ImageArea ( 'forest' , col = 'Drak Green' , density = 'light' ) ) m . add_layer ( ImageArea ( 'fields' , col = 'Green' , density = 'light' ) ) m . add_layer ( ImageObject ( 'mountains' ) ) m . add_layer ( ImageObject ( 'trees' ) ) m . add_layer ( ImageObject ( 'towns' ) )
|
Test function to step through all functions in order to try and identify all features on a map This test function should be placed in a main section later
|
2,337
|
def describe_contents ( self ) : print ( '======================================================================' ) print ( self ) print ( 'Table = ' , str ( len ( self . header ) ) + ' cols x ' + str ( len ( self . arr ) ) + ' rows' ) print ( 'HEADER = ' , self . get_header ( ) ) print ( 'arr = ' , self . arr [ 0 : 2 ] )
|
describes various contents of data table
|
2,338
|
def get_distinct_values_from_cols ( self , l_col_list ) : uniq_vals = [ ] for l_col_name in l_col_list : uniq_vals . append ( set ( self . get_col_data_by_name ( l_col_name ) ) ) if len ( l_col_list ) == 0 : return [ ] elif len ( l_col_list ) == 1 : return sorted ( [ v for v in uniq_vals ] ) elif len ( l_col_list ) == 2 : res = [ ] res = [ ( a , b ) for a in uniq_vals [ 0 ] for b in uniq_vals [ 1 ] ] return res else : print ( "TODO " ) return - 44
|
returns the list of distinct combinations in a dataset based on the columns in the list . Note that this is currently implemented as MAX permutations of the combo so it is not guarenteed to have values in each case .
|
2,339
|
def select_where ( self , where_col_list , where_value_list , col_name = '' ) : res = [ ] col_ids = [ ] for col_id , col in enumerate ( self . header ) : if col in where_col_list : col_ids . append ( [ col_id , col ] ) for row_num , row in enumerate ( self . arr ) : keep_this_row = True for ndx , where_col in enumerate ( col_ids ) : if row [ where_col [ 0 ] ] != where_value_list [ ndx ] : keep_this_row = False if keep_this_row is True : if col_name == '' : res . append ( [ row_num , row ] ) else : l_dat = self . get_col_by_name ( col_name ) if l_dat is not None : res . append ( row [ l_dat ] ) return res
|
selects rows from the array where col_list == val_list
|
2,340
|
def update_where ( self , col , value , where_col_list , where_value_list ) : if type ( col ) is str : col_ndx = self . get_col_by_name ( col ) else : col_ndx = col new_arr = self . select_where ( where_col_list , where_value_list ) for r in new_arr : self . arr [ r [ 0 ] ] [ col_ndx ] = value
|
updates the array to set cell = value where col_list == val_list
|
2,341
|
def percentile ( self , lst_data , percent , key = lambda x : x ) : new_list = sorted ( lst_data ) k = ( len ( new_list ) - 1 ) * percent f = math . floor ( k ) c = math . ceil ( k ) if f == c : return key ( new_list [ int ( k ) ] ) d0 = float ( key ( new_list [ int ( f ) ] ) ) * ( c - k ) d1 = float ( key ( new_list [ int ( c ) ] ) ) * ( k - f ) return d0 + d1
|
calculates the num percentile of the items in the list
|
2,342
|
def save ( self , filename , content ) : with open ( filename , "w" ) as f : if hasattr ( content , '__iter__' ) : f . write ( '\n' . join ( [ row for row in content ] ) ) else : print ( 'WRINGI CONTWETESWREWR' ) f . write ( str ( content ) )
|
default is to save a file from list of lines
|
2,343
|
def save_csv ( self , filename , write_header_separately = True ) : txt = '' with open ( filename , "w" ) as f : if write_header_separately : f . write ( ',' . join ( [ c for c in self . header ] ) + '\n' ) for row in self . arr : txt = ',' . join ( [ self . force_to_string ( col ) for col in row ] ) f . write ( txt + '\n' ) f . write ( '\n' )
|
save the default array as a CSV file
|
2,344
|
def drop ( self , fname ) : if self . dataset_type == 'file' : import os try : os . remove ( fname ) except Exception as ex : print ( 'cant drop file "' + fname + '" : ' + str ( ex ) )
|
drop the table view or delete the file
|
2,345
|
def get_col_data_by_name ( self , col_name , WHERE_Clause = '' ) : col_key = self . get_col_by_name ( col_name ) if col_key is None : print ( 'get_col_data_by_name: col_name = ' , col_name , ' NOT FOUND' ) return [ ] res = [ ] for row in self . arr : res . append ( row [ col_key ] ) return res
|
returns the values of col_name according to where
|
2,346
|
def format_rst ( self ) : res = '' num_cols = len ( self . header ) col_width = 25 for _ in range ( num_cols ) : res += '' . join ( [ '=' for _ in range ( col_width - 1 ) ] ) + ' ' res += '\n' for c in self . header : res += c . ljust ( col_width ) res += '\n' for _ in range ( num_cols ) : res += '' . join ( [ '=' for _ in range ( col_width - 1 ) ] ) + ' ' res += '\n' for row in self . arr : for c in row : res += self . force_to_string ( c ) . ljust ( col_width ) res += '\n' for _ in range ( num_cols ) : res += '' . join ( [ '=' for _ in range ( col_width - 1 ) ] ) + ' ' res += '\n' return res
|
return table in RST format
|
2,347
|
def getHomoloGene ( taxfile = "build_inputs/taxid_taxname" , genefile = "homologene.data" , proteinsfile = "build_inputs/all_proteins.data" , proteinsclusterfile = "build_inputs/proteins_for_clustering.data" , baseURL = "http://ftp.ncbi.nih.gov/pub/HomoloGene/current/" ) : def getDf ( inputfile ) : if os . path . isfile ( inputfile ) : df = pd . read_table ( inputfile , header = None ) else : df = urllib2 . urlopen ( baseURL + inputfile ) df = df . read ( ) . split ( "\n" ) df = [ s for s in df if len ( s ) > 0 ] df = [ s . split ( "\t" ) for s in df ] df = pd . DataFrame ( df ) return df taxdf = getDf ( taxfile ) taxdf . set_index ( [ 0 ] , inplace = True ) taxdi = taxdf . to_dict ( ) . get ( 1 ) genedf = getDf ( genefile ) genecols = [ "HID" , "Taxonomy ID" , "Gene ID" , "Gene Symbol" , "Protein gi" , "Protein accession" ] genedf . columns = genecols genedf [ "organism" ] = genedf [ "Taxonomy ID" ] . apply ( lambda x : taxdi . get ( x ) ) proteinsdf = getDf ( proteinsfile ) proteinscols = [ "taxid" , "entrez GeneID" , "gene symbol" , "gene description" , "protein accession.ver" , "mrna accession.ver" , "length of protein listed in column 5" , "-11) contains data about gene location on the genome" , "starting position of gene in 0-based coordinate" , "end position of the gene in 0-based coordinate" , "strand" , "nucleotide gi of genomic sequence where this gene is annotated" ] proteinsdf . columns = proteinscols proteinsdf [ "organism" ] = proteinsdf [ "taxid" ] . apply ( lambda x : taxdi . get ( x ) ) protclusdf = getDf ( proteinsclusterfile ) protclustercols = [ "taxid" , "entrez GeneID" , "gene symbol" , "gene description" , "protein accession.ver" , "mrna accession.ver" , "length of protein listed in column 5" , "-11) contains data about gene location on the genome" , "starting position of gene in 0-based coordinate" , "end position of the gene in 0-based coordinate" , "strand" , "nucleotide gi of genomic sequence where this gene is annotated" ] protclusdf . columns = proteinscols protclusdf [ "organism" ] = protclusdf [ "taxid" ] . apply ( lambda x : taxdi . get ( x ) ) return genedf , protclusdf , proteinsdf
|
Returns NBCI s Homolog Gene tables .
|
2,348
|
def getFasta ( opened_file , sequence_name ) : lines = opened_file . readlines ( ) seq = str ( "" ) for i in range ( 0 , len ( lines ) ) : line = lines [ i ] if line [ 0 ] == ">" : fChr = line . split ( " " ) [ 0 ] . split ( "\n" ) [ 0 ] fChr = fChr [ 1 : ] if fChr == sequence_name : s = i code = [ 'N' , 'A' , 'C' , 'T' , 'G' ] firstbase = lines [ s + 1 ] [ 0 ] while firstbase in code : s = s + 1 seq = seq + lines [ s ] firstbase = lines [ s + 1 ] [ 0 ] if len ( seq ) == 0 : seq = None else : seq = seq . split ( "\n" ) seq = "" . join ( seq ) return seq
|
Retrieves a sequence from an opened multifasta file
|
2,349
|
def writeFasta ( sequence , sequence_name , output_file ) : i = 0 f = open ( output_file , 'w' ) f . write ( ">" + str ( sequence_name ) + "\n" ) while i <= len ( sequence ) : f . write ( sequence [ i : i + 60 ] + "\n" ) i = i + 60 f . close ( )
|
Writes a fasta sequence into a file .
|
2,350
|
def rewriteFasta ( sequence , sequence_name , fasta_in , fasta_out ) : f = open ( fasta_in , 'r+' ) f2 = open ( fasta_out , 'w' ) lines = f . readlines ( ) i = 0 while i < len ( lines ) : line = lines [ i ] if line [ 0 ] == ">" : f2 . write ( line ) fChr = line . split ( " " ) [ 0 ] fChr = fChr [ 1 : ] if fChr == sequence_name : code = [ 'N' , 'A' , 'C' , 'T' , 'G' ] firstbase = lines [ i + 1 ] [ 0 ] while firstbase in code : i = i + 1 firstbase = lines [ i ] [ 0 ] s = 0 while s <= len ( sequence ) : f2 . write ( sequence [ s : s + 60 ] + "\n" ) s = s + 60 else : i = i + 1 else : f2 . write ( line ) i = i + 1 f2 . close f . close
|
Rewrites a specific sequence in a multifasta file while keeping the sequence header .
|
2,351
|
def _get_tool_str ( self , tool ) : res = tool [ 'file' ] try : res += '.' + tool [ 'function' ] except Exception as ex : print ( 'Warning - no function defined for tool ' + str ( tool ) ) res += '\n' return res
|
get a string representation of the tool
|
2,352
|
def get_tool_by_name ( self , nme ) : for t in self . lstTools : if 'name' in t : if t [ 'name' ] == nme : return t if 'file' in t : if t [ 'file' ] == nme : return t return None
|
get the tool object by name or file
|
2,353
|
def save ( self , fname = '' ) : if fname != '' : with open ( fname , 'w' ) as f : for t in self . lstTools : self . verify ( t ) f . write ( self . tool_as_string ( t ) )
|
Save the list of tools to AIKIF core and optionally to local file fname
|
2,354
|
def verify ( self , tool ) : if os . path . isfile ( tool [ 'file' ] ) : print ( 'Toolbox: program exists = TOK :: ' + tool [ 'file' ] ) return True else : print ( 'Toolbox: program exists = FAIL :: ' + tool [ 'file' ] ) return False
|
check that the tool exists
|
2,355
|
def run ( self , tool , args , new_import_path = '' ) : if new_import_path != '' : sys . path . append ( new_import_path ) print ( 'main called ' + tool [ 'file' ] + '->' + tool [ 'function' ] + ' with ' , args , ' = ' , tool [ 'return' ] ) mod = __import__ ( os . path . basename ( tool [ 'file' ] ) . split ( '.' ) [ 0 ] ) func = getattr ( mod , tool [ 'function' ] ) tool [ 'return' ] = func ( args ) return tool [ 'return' ]
|
import the tool and call the function passing the args .
|
2,356
|
def main ( ** kwargs ) : options = ApplicationOptions ( ** kwargs ) Event . configure ( is_logging_enabled = options . event_logging ) application = Application ( options ) application . run ( options . definition )
|
The Pipeline tool .
|
2,357
|
def setup_logging ( self ) : is_custom_logging = len ( self . options . logging_config ) > 0 is_custom_logging = is_custom_logging and os . path . isfile ( self . options . logging_config ) is_custom_logging = is_custom_logging and not self . options . dry_run if is_custom_logging : Logger . configure_by_file ( self . options . logging_config ) else : logging_format = "%(asctime)-15s - %(name)s - %(message)s" if self . options . dry_run : logging_format = "%(name)s - %(message)s" Logger . configure_default ( logging_format , self . logging_level )
|
Setup of application logging .
|
2,358
|
def validate_document ( self , definition ) : initial_document = { } try : initial_document = Loader . load ( definition ) except RuntimeError as exception : self . logger . error ( str ( exception ) ) sys . exit ( 1 ) document = Validator ( ) . validate ( initial_document ) if document is None : self . logger . info ( "Schema validation for '%s' has failed" , definition ) sys . exit ( 1 ) self . logger . info ( "Schema validation for '%s' succeeded" , definition ) return document
|
Validate given pipeline document .
|
2,359
|
def run_matrix ( self , matrix_definition , document ) : matrix = Matrix ( matrix_definition , 'matrix(parallel)' in document ) process_data = MatrixProcessData ( ) process_data . options = self . options process_data . pipeline = document [ 'pipeline' ] process_data . model = { } if 'model' not in document else document [ 'model' ] process_data . hooks = Hooks ( document ) return matrix . process ( process_data )
|
Running pipeline via a matrix .
|
2,360
|
def shutdown ( self , collector , success ) : self . event . delegate ( success ) if collector is not None : collector . queue . put ( None ) collector . join ( ) if not success : sys . exit ( 1 )
|
Shutdown of the application .
|
2,361
|
def provide_temporary_scripts_path ( self ) : if len ( self . options . temporary_scripts_path ) > 0 : if os . path . isfile ( self . options . temporary_scripts_path ) : self . logger . error ( "Error: configured script path seems to be a file!" ) sys . exit ( 1 ) if not os . path . isdir ( self . options . temporary_scripts_path ) : os . makedirs ( self . options . temporary_scripts_path )
|
When configured trying to ensure that path does exist .
|
2,362
|
def create_and_run_collector ( document , options ) : collector = None if not options . report == 'off' : collector = Collector ( ) collector . store . configure ( document ) Event . configure ( collector_queue = collector . queue ) collector . start ( ) return collector
|
Create and run collector process for report data .
|
2,363
|
def docker_environment ( env ) : return ' ' . join ( [ "-e \"%s=%s\"" % ( key , value . replace ( "$" , "\\$" ) . replace ( "\"" , "\\\"" ) . replace ( "`" , "\\`" ) ) for key , value in env . items ( ) ] )
|
Transform dictionary of environment variables into Docker - e parameters .
|
2,364
|
def _retrieve_download_url ( ) : try : with urlopen ( config [ 'nrfa' ] [ 'oh_json_url' ] , timeout = 10 ) as f : remote_config = json . loads ( f . read ( ) . decode ( 'utf-8' ) ) if remote_config [ 'nrfa_url' ] . startswith ( '.' ) : remote_config [ 'nrfa_url' ] = 'file:' + pathname2url ( os . path . abspath ( remote_config [ 'nrfa_url' ] ) ) _update_nrfa_metadata ( remote_config ) return remote_config [ 'nrfa_url' ] except URLError : return config [ 'nrfa' ] [ 'url' ]
|
Retrieves download location for FEH data zip file from hosted json configuration file .
|
2,365
|
def update_available ( after_days = 1 ) : never_downloaded = not bool ( config . get ( 'nrfa' , 'downloaded_on' , fallback = None ) or None ) if never_downloaded : config . set_datetime ( 'nrfa' , 'update_checked_on' , datetime . utcnow ( ) ) config . save ( ) return True last_checked_on = config . get_datetime ( 'nrfa' , 'update_checked_on' , fallback = None ) or datetime . fromtimestamp ( 0 ) if datetime . utcnow ( ) < last_checked_on + timedelta ( days = after_days ) : return False current_version = LooseVersion ( config . get ( 'nrfa' , 'version' , fallback = '0' ) or '0' ) try : with urlopen ( config [ 'nrfa' ] [ 'oh_json_url' ] , timeout = 10 ) as f : remote_version = LooseVersion ( json . loads ( f . read ( ) . decode ( 'utf-8' ) ) [ 'nrfa_version' ] ) config . set_datetime ( 'nrfa' , 'update_checked_on' , datetime . utcnow ( ) ) config . save ( ) return remote_version > current_version except URLError : return None
|
Check whether updated NRFA data is available .
|
2,366
|
def download_data ( ) : with urlopen ( _retrieve_download_url ( ) ) as f : with open ( os . path . join ( CACHE_FOLDER , CACHE_ZIP ) , "wb" ) as local_file : local_file . write ( f . read ( ) )
|
Downloads complete station dataset including catchment descriptors and amax records . And saves it into a cache folder .
|
2,367
|
def _update_nrfa_metadata ( remote_config ) : config [ 'nrfa' ] [ 'oh_json_url' ] = remote_config [ 'nrfa_oh_json_url' ] config [ 'nrfa' ] [ 'version' ] = remote_config [ 'nrfa_version' ] config [ 'nrfa' ] [ 'url' ] = remote_config [ 'nrfa_url' ] config . set_datetime ( 'nrfa' , 'published_on' , datetime . utcfromtimestamp ( remote_config [ 'nrfa_published_on' ] ) ) config . set_datetime ( 'nrfa' , 'downloaded_on' , datetime . utcnow ( ) ) config . set_datetime ( 'nrfa' , 'update_checked_on' , datetime . utcnow ( ) ) config . save ( )
|
Save NRFA metadata to local config file using retrieved config data
|
2,368
|
def nrfa_metadata ( ) : result = { 'url' : config . get ( 'nrfa' , 'url' , fallback = None ) or None , 'version' : config . get ( 'nrfa' , 'version' , fallback = None ) or None , 'published_on' : config . get_datetime ( 'nrfa' , 'published_on' , fallback = None ) or None , 'downloaded_on' : config . get_datetime ( 'nrfa' , 'downloaded_on' , fallback = None ) or None } return result
|
Return metadata on the NRFA data .
|
2,369
|
def unzip_data ( ) : with ZipFile ( os . path . join ( CACHE_FOLDER , CACHE_ZIP ) , 'r' ) as zf : zf . extractall ( path = CACHE_FOLDER )
|
Extract all files from downloaded FEH data zip file .
|
2,370
|
def get_xml_stats ( fname ) : f = mod_file . TextFile ( fname ) res = { } res [ 'shortname' ] = f . name res [ 'folder' ] = f . path res [ 'filesize' ] = str ( f . size ) + ' bytes' res [ 'num_lines' ] = str ( f . lines ) + ' lines' res [ 'date_modified' ] = f . GetDateAsString ( f . date_modified ) return res
|
return a dictionary of statistics about an XML file including size in bytes num lines number of elements count by elements
|
2,371
|
def make_random_xml_file ( fname , num_elements = 200 , depth = 3 ) : with open ( fname , 'w' ) as f : f . write ( '<?xml version="1.0" ?>\n<random>\n' ) for dep_num , _ in enumerate ( range ( 1 , depth ) ) : f . write ( ' <depth>\n <content>\n' ) for num , _ in enumerate ( range ( 1 , num_elements ) ) : f . write ( ' <stuff>data line ' + str ( num ) + '</stuff>\n' ) f . write ( ' </content>\n </depth>\n' ) f . write ( '</random>\n' )
|
makes a random xml file mainly for testing the xml_split
|
2,372
|
def organismsKEGG ( ) : organisms = urlopen ( "http://rest.kegg.jp/list/organism" ) . read ( ) organisms = organisms . split ( "\n" ) organisms = [ s . split ( "\t" ) for s in organisms ] organisms = pd . DataFrame ( organisms ) return organisms
|
Lists all organisms present in the KEGG database .
|
2,373
|
def databasesKEGG ( organism , ens_ids ) : all_genes = urlopen ( "http://rest.kegg.jp/list/" + organism ) . read ( ) all_genes = all_genes . split ( "\n" ) dbs = [ ] while len ( dbs ) == 0 : for g in all_genes : if len ( dbs ) == 0 : kid = g . split ( "\t" ) [ 0 ] gene = urlopen ( "http://rest.kegg.jp/get/" + kid ) . read ( ) DBLINKS = gene . split ( "\n" ) DBLINKS = [ s for s in DBLINKS if ":" in s ] for d in DBLINKS : test = d . split ( " " ) test = test [ len ( test ) - 1 ] if test in ens_ids : DBLINK = [ s for s in DBLINKS if test in s ] DBLINK = DBLINK [ 0 ] . split ( ":" ) DBLINK = DBLINK [ len ( DBLINK ) - 2 ] dbs . append ( DBLINK ) else : break ens_db = dbs [ 0 ] . split ( " " ) ens_db = ens_db [ len ( ens_db ) - 1 ] test_db = urlopen ( "http://rest.genome.jp/link/" + ens_db + "/" + organism ) . read ( ) test_db = test_db . split ( "\n" ) if len ( test_db ) == 1 : print ( "For " + organism + " the following db was found: " + ens_db ) print ( "This database does not seem to be valid KEGG-linked database identifier" ) print ( "For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'" ) sys . stdout . flush ( ) ens_db = None else : print ( "For " + organism + " the following db was found: " + ens_db ) sys . stdout . flush ( ) return ens_db
|
Finds KEGG database identifiers for a respective organism given example ensembl ids .
|
2,374
|
def ensembl_to_kegg ( organism , kegg_db ) : print ( "KEGG API: http://rest.genome.jp/link/" + kegg_db + "/" + organism ) sys . stdout . flush ( ) kegg_ens = urlopen ( "http://rest.genome.jp/link/" + kegg_db + "/" + organism ) . read ( ) kegg_ens = kegg_ens . split ( "\n" ) final = [ ] for i in kegg_ens : final . append ( i . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] ens_id = pd . DataFrame ( df [ 1 ] . str . split ( ":" ) . tolist ( ) ) [ 1 ] df = pd . concat ( [ df , ens_id ] , axis = 1 ) df . columns = [ 'KEGGid' , 'ensDB' , 'ENSid' ] df = df [ [ 'KEGGid' , 'ENSid' ] ] return df
|
Looks up KEGG mappings of KEGG ids to ensembl ids
|
2,375
|
def ecs_idsKEGG ( organism ) : kegg_ec = urlopen ( "http://rest.kegg.jp/link/" + organism + "/enzyme" ) . read ( ) kegg_ec = kegg_ec . split ( "\n" ) final = [ ] for k in kegg_ec : final . append ( k . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df . columns = [ 'ec' , 'KEGGid' ] return df
|
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
|
2,376
|
def idsKEGG ( organism ) : ORG = urlopen ( "http://rest.kegg.jp/list/" + organism ) . read ( ) ORG = ORG . split ( "\n" ) final = [ ] for k in ORG : final . append ( k . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df . columns = [ 'KEGGid' , 'description' ] field = pd . DataFrame ( df [ 'description' ] . str . split ( ';' , 1 ) . tolist ( ) ) [ 0 ] field = pd . DataFrame ( field ) df = pd . concat ( [ df [ [ 'KEGGid' ] ] , field ] , axis = 1 ) df . columns = [ 'KEGGid' , 'gene_name' ] df = df [ [ 'gene_name' , 'KEGGid' ] ] return df
|
Uses KEGG to retrieve all ids for a given KEGG organism
|
2,377
|
def biomaRtTOkegg ( df ) : df = df . dropna ( ) ECcols = df . columns . tolist ( ) df . reset_index ( inplace = True , drop = True ) field = pd . DataFrame ( df [ 'kegg_enzyme' ] . str . split ( '+' , 1 ) . tolist ( ) ) [ 1 ] field = pd . DataFrame ( field ) df = pd . concat ( [ df [ [ 'ensembl_gene_id' ] ] , field ] , axis = 1 ) df . columns = ECcols df . drop_duplicates ( inplace = True ) df . reset_index ( inplace = True , drop = True ) plus = df [ 'kegg_enzyme' ] . tolist ( ) plus = [ s for s in plus if "+" in s ] noPlus = df [ ~ df [ 'kegg_enzyme' ] . isin ( plus ) ] plus = df [ df [ 'kegg_enzyme' ] . isin ( plus ) ] noPlus . reset_index ( inplace = True , drop = True ) plus . reset_index ( inplace = True , drop = True ) for p in range ( 0 , len ( plus ) ) : enz = plus . ix [ p ] [ 'kegg_enzyme' ] enz = enz . split ( "+" ) enz = pd . DataFrame ( enz ) enz . colums = [ 'kegg_enzyme' ] enz [ 'ensembl_gene_id' ] = plus . ix [ p ] [ 'kegg_enzyme' ] noPlus = pd . concat ( [ noPlus , enz ] ) noPlus = noPlus . drop_duplicates ( ) noPlus = noPlus [ [ 'ensembl_gene_id' , 'kegg_enzyme' ] ] noPlus [ 'fake' ] = 'ec:' noPlus [ 'kegg_enzyme' ] = noPlus [ 'fake' ] + noPlus [ 'kegg_enzyme' ] noPlus = noPlus [ [ 'ensembl_gene_id' , 'kegg_enzyme' ] ] return noPlus
|
Transforms a pandas dataframe with the columns ensembl_gene_id kegg_enzyme to dataframe ready for use in ...
|
2,378
|
def expKEGG ( organism , names_KEGGids ) : kegg_paths = urlopen ( "http://rest.kegg.jp/list/pathway/" + organism ) . read ( ) kegg_paths = kegg_paths . split ( "\n" ) final = [ ] for k in kegg_paths : final . append ( k . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df . columns = [ 'pathID' , 'pathName' ] print ( "Collecting genes for pathways" ) sys . stdout . flush ( ) df_pg = pd . DataFrame ( ) for i in df [ 'pathID' ] . tolist ( ) : print ( i ) sys . stdout . flush ( ) path_genes = urlopen ( "http://rest.kegg.jp/link/genes/" + i ) . read ( ) path_genes = path_genes . split ( "\n" ) final = [ ] for k in path_genes : final . append ( k . split ( "\t" ) ) if len ( final [ 0 ] ) > 1 : df_tmp = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df_tmp . columns = [ 'pathID' , 'KEGGid' ] df_pg = pd . concat ( [ df_pg , df_tmp ] ) df = pd . merge ( df , df_pg , on = [ "pathID" ] , how = "outer" ) df = df [ df [ 'KEGGid' ] . isin ( names_KEGGids [ 'KEGGid' ] . tolist ( ) ) ] df = pd . merge ( df , names_KEGGids , how = 'left' , on = [ 'KEGGid' ] ) df_fA = pd . DataFrame ( columns = [ 'KEGGid' ] ) paths = [ ] for k in df [ [ 'pathID' ] ] . drop_duplicates ( ) [ 'pathID' ] . tolist ( ) : df_tmp = df [ df [ 'pathID' ] == k ] pathName = df_tmp [ 'pathName' ] . tolist ( ) [ 0 ] pathName = " : " . join ( [ k , pathName ] ) keggIDs_in_path = df_tmp [ [ 'KEGGid' ] ] . drop_duplicates ( ) [ 'KEGGid' ] . tolist ( ) a = { pathName : keggIDs_in_path } a = pd . DataFrame ( a , index = range ( len ( keggIDs_in_path ) ) ) a [ 'KEGGid' ] = a [ pathName ] . copy ( ) df_fA = pd . merge ( df_fA , a , how = 'outer' , on = [ 'KEGGid' ] ) paths . append ( pathName ) return df_fA , paths
|
Gets all KEGG pathways for an organism
|
2,379
|
def RdatabasesBM ( host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) print ( biomaRt . listMarts ( host = host ) )
|
Lists BioMart databases through a RPY2 connection .
|
2,380
|
def RdatasetsBM ( database , host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) ensemblMart = biomaRt . useMart ( database , host = host ) print ( biomaRt . listDatasets ( ensemblMart ) )
|
Lists BioMart datasets through a RPY2 connection .
|
2,381
|
def RfiltersBM ( dataset , database , host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) ensemblMart = biomaRt . useMart ( database , host = host ) ensembl = biomaRt . useDataset ( dataset , mart = ensemblMart ) print ( biomaRt . listFilters ( ensembl ) )
|
Lists BioMart filters through a RPY2 connection .
|
2,382
|
def RattributesBM ( dataset , database , host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) ensemblMart = biomaRt . useMart ( database , host = rbiomart_host ) ensembl = biomaRt . useDataset ( dataset , mart = ensemblMart ) print ( biomaRt . listAttributes ( ensembl ) )
|
Lists BioMart attributes through a RPY2 connection .
|
2,383
|
def get_list_of_applications ( ) : apps = mod_prg . Programs ( 'Applications' , 'C:\\apps' ) fl = mod_fl . FileList ( [ 'C:\\apps' ] , [ '*.exe' ] , [ "\\bk\\" ] ) for f in fl . get_list ( ) : apps . add ( f , 'autogenerated list' ) apps . list ( ) apps . save ( )
|
Get list of applications
|
2,384
|
def add_field ( self , name , label , field_type , * args , ** kwargs ) : if name in self . _dyn_fields : raise AttributeError ( 'Field already added to the form.' ) else : self . _dyn_fields [ name ] = { 'label' : label , 'type' : field_type , 'args' : args , 'kwargs' : kwargs }
|
Add the field to the internal configuration dictionary .
|
2,385
|
def add_validator ( self , name , validator , * args , ** kwargs ) : if name in self . _dyn_fields : if 'validators' in self . _dyn_fields [ name ] : self . _dyn_fields [ name ] [ 'validators' ] . append ( validator ) self . _dyn_fields [ name ] [ validator . __name__ ] = { } if args : self . _dyn_fields [ name ] [ validator . __name__ ] [ 'args' ] = args if kwargs : self . _dyn_fields [ name ] [ validator . __name__ ] [ 'kwargs' ] = kwargs else : self . _dyn_fields [ name ] [ 'validators' ] = [ ] self . add_validator ( name , validator , * args , ** kwargs ) else : raise AttributeError ( 'Field "{0}" does not exist. ' 'Did you forget to add it?' . format ( name ) )
|
Add the validator to the internal configuration dictionary .
|
2,386
|
def process ( self , form , post ) : if not isinstance ( form , FormMeta ) : raise TypeError ( 'Given form is not a valid WTForm.' ) re_field_name = re . compile ( r'\%([a-zA-Z0-9_]*)\%' ) class F ( form ) : pass for field , data in post . iteritems ( ) : if field in F ( ) : continue else : if field in self . _dyn_fields : field_cname = field current_set_number = None elif ( field . split ( '_' ) [ - 1 ] . isdigit ( ) and field [ : - ( len ( field . split ( '_' ) [ - 1 ] ) ) - 1 ] in self . _dyn_fields . keys ( ) ) : field_cname = field [ : - ( len ( field . split ( '_' ) [ - 1 ] ) ) - 1 ] current_set_number = str ( field . split ( '_' ) [ - 1 ] ) else : continue validators = [ ] if 'validators' in self . _dyn_fields [ field_cname ] : for validator in self . _dyn_fields [ field_cname ] [ 'validators' ] : args = [ ] kwargs = { } if 'args' in self . _dyn_fields [ field_cname ] [ validator . __name__ ] : if not current_set_number : args = self . _dyn_fields [ field_cname ] [ validator . __name__ ] [ 'args' ] else : for arg in self . _dyn_fields [ field_cname ] [ validator . __name__ ] [ 'args' ] : try : arg = re_field_name . sub ( r'\1' + '_' + current_set_number , arg ) except : pass args . append ( arg ) if 'kwargs' in self . _dyn_fields [ field_cname ] [ validator . __name__ ] : if not current_set_number : kwargs = self . _dyn_fields [ field_cname ] [ validator . __name__ ] [ 'kwargs' ] else : for key , arg in self . iteritems ( self . _dyn_fields [ field_cname ] [ validator . __name__ ] [ 'kwargs' ] ) : try : arg = re_field_name . sub ( r'\1' + '_' + current_set_number , arg ) except : pass kwargs [ key ] = arg validators . append ( validator ( * args , ** kwargs ) ) field_type = self . _dyn_fields [ field_cname ] [ 'type' ] field_label = self . _dyn_fields [ field_cname ] [ 'label' ] field_args = self . _dyn_fields [ field_cname ] [ 'args' ] field_kwargs = self . _dyn_fields [ field_cname ] [ 'kwargs' ] setattr ( F , field , field_type ( field_label , validators = validators , * field_args , ** field_kwargs ) ) if self . flask_wtf : form = F ( ) else : form = F ( post ) return form
|
Process the given WTForm Form object .
|
2,387
|
def GetBEDnarrowPeakgz ( URL_or_PATH_TO_file ) : if os . path . isfile ( URL_or_PATH_TO_file ) : response = open ( URL_or_PATH_TO_file , "r" ) compressedFile = StringIO . StringIO ( response . read ( ) ) else : response = urllib2 . urlopen ( URL_or_PATH_TO_file ) compressedFile = StringIO . StringIO ( response . read ( ) ) decompressedFile = gzip . GzipFile ( fileobj = compressedFile ) out = decompressedFile . read ( ) . split ( "\n" ) out = [ s . split ( "\t" ) for s in out ] out = pd . DataFrame ( out ) out . columns = [ "chrom" , "chromStart" , "chromEnd" , "name" , "score" , "strand" , "signalValue" , "-log10(pValue)" , "-log10(qvalue)" , "peak" ] out [ "name" ] = out . index . tolist ( ) out [ "name" ] = "Peak_" + out [ "name" ] . astype ( str ) out = out [ : - 1 ] return out
|
Reads a gz compressed BED narrow peak file from a web address or local file
|
2,388
|
def dfTObedtool ( df ) : df = df . astype ( str ) df = df . drop_duplicates ( ) df = df . values . tolist ( ) df = [ "\t" . join ( s ) for s in df ] df = "\n" . join ( df ) df = BedTool ( df , from_string = True ) return df
|
Transforms a pandas dataframe into a bedtool
|
2,389
|
def configure ( ** kwargs ) : for key in kwargs : if key == 'is_logging_enabled' : Event . is_logging_enabled = kwargs [ key ] elif key == 'collector_queue' : Event . collector_queue = kwargs [ key ] else : Logger . get_logger ( __name__ ) . error ( "Unknown key %s in configure or bad type %s" , key , type ( kwargs [ key ] ) )
|
Global configuration for event handling .
|
2,390
|
def failed ( self , ** kwargs ) : self . finished = datetime . now ( ) self . status = 'failed' self . information . update ( kwargs ) self . logger . info ( "Failed - took %f seconds." , self . duration ( ) ) self . update_report_collector ( int ( time . mktime ( self . finished . timetuple ( ) ) ) )
|
Finish event as failed with optional additional information .
|
2,391
|
def update_report_collector ( self , timestamp ) : report_enabled = 'report' in self . information and self . information [ 'report' ] == 'html' report_enabled = report_enabled and 'stage' in self . information report_enabled = report_enabled and Event . collector_queue is not None if report_enabled : Event . collector_queue . put ( CollectorUpdate ( matrix = self . information [ 'matrix' ] if 'matrix' in self . information else 'default' , stage = self . information [ 'stage' ] , status = self . status , timestamp = timestamp , information = self . information ) )
|
Updating report collector for pipeline details .
|
2,392
|
def count_lines_in_file ( src_file ) : tot = 0 res = '' try : with open ( src_file , 'r' ) as f : for line in f : tot += 1 res = str ( tot ) + ' recs read' except : res = 'ERROR -couldnt open file' return res
|
test function .
|
2,393
|
def load_txt_to_sql ( tbl_name , src_file_and_path , src_file , op_folder ) : if op_folder == '' : pth = '' else : pth = op_folder + os . sep fname_create_script = pth + 'CREATE_' + tbl_name + '.SQL' fname_backout_file = pth + 'BACKOUT_' + tbl_name + '.SQL' fname_control_file = pth + tbl_name + '.CTL' cols = read_csv_cols_to_table_cols ( src_file ) create_script_staging_table ( fname_create_script , tbl_name , cols ) create_file ( fname_backout_file , 'DROP TABLE ' + tbl_name + ' CASCADE CONSTRAINTS;\n' ) create_CTL ( fname_control_file , tbl_name , cols , 'TRUNCATE' )
|
creates a SQL loader script to load a text file into a database and then executes it . Note that src_file is
|
2,394
|
async def anext ( * args ) : if not args : raise TypeError ( 'anext() expected at least 1 arguments, got 0' ) if len ( args ) > 2 : raise TypeError ( 'anext() expected at most 2 arguments, got {}' . format ( len ( args ) ) ) iterable , default , has_default = args [ 0 ] , None , False if len ( args ) == 2 : iterable , default = args has_default = True try : return await iterable . __anext__ ( ) except StopAsyncIteration as exc : if has_default : return default raise StopAsyncIteration ( ) from exc
|
Return the next item from an async iterator .
|
2,395
|
def repeat ( obj , times = None ) : if times is None : return AsyncIterWrapper ( sync_itertools . repeat ( obj ) ) return AsyncIterWrapper ( sync_itertools . repeat ( obj , times ) )
|
Make an iterator that returns object over and over again .
|
2,396
|
def _async_callable ( func ) : if isinstance ( func , types . CoroutineType ) : return func @ functools . wraps ( func ) async def _async_def_wrapper ( * args , ** kwargs ) : return func ( * args , ** kwargs ) return _async_def_wrapper
|
Ensure the callable is an async def .
|
2,397
|
def tee ( iterable , n = 2 ) : tees = tuple ( AsyncTeeIterable ( iterable ) for _ in range ( n ) ) for tee in tees : tee . _siblings = tees return tees
|
Return n independent iterators from a single iterable .
|
2,398
|
def _on_change ( self , obj , old , value , ** kwargs ) : kwargs [ 'property' ] = self obj . emit ( self . name , obj , value , old = old , ** kwargs )
|
Called internally to emit changes from the instance object
|
2,399
|
def parse_str ( self , s ) : self . object = self . parsed_class ( ) in_section = None for line in s . split ( '\n' ) : if line . lower ( ) . startswith ( '[end]' ) : in_section = None elif line . startswith ( '[' ) : in_section = line . strip ( ) . strip ( '[]' ) . lower ( ) . replace ( ' ' , '_' ) elif in_section : try : getattr ( self , '_section_' + in_section ) ( line . strip ( ) ) except AttributeError : pass return self . object
|
Parse string and return relevant object
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.