idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
47,300 | def dim_reduce_data ( data , d ) : genes , cells = data . shape distances = np . zeros ( ( cells , cells ) ) for i in range ( cells ) : for j in range ( cells ) : distances [ i , j ] = poisson_dist ( data [ : , i ] , data [ : , j ] ) proximity = distances ** 2 J = np . eye ( cells ) - 1. / cells B = - 0.5 * np . dot ( J , np . dot ( proximity , J ) ) e_val , e_vec = np . linalg . eigh ( B ) lam = np . diag ( e_val [ - d : ] ) [ : : - 1 ] E = e_vec [ : , - d : ] [ : : - 1 ] X = np . dot ( E , lam ** 0.5 ) return X | Does a MDS on the data directly not on the means . |
47,301 | def case ( self , case ) : LOG . debug ( "Getting case {0} from database" . format ( case . get ( 'case_id' ) ) ) case_id = case [ 'case_id' ] return self . db . case . find_one ( { 'case_id' : case_id } ) | Get a case from the database |
47,302 | def nr_cases ( self , snv_cases = None , sv_cases = None ) : query = { } if snv_cases : query = { 'vcf_path' : { '$exists' : True } } if sv_cases : query = { 'vcf_sv_path' : { '$exists' : True } } if snv_cases and sv_cases : query = None return self . db . case . count_documents ( query ) | Return the number of cases in the database |
47,303 | def add_case ( self , case , update = False ) : existing_case = self . case ( case ) if existing_case and not update : raise CaseError ( "Case {} already exists" . format ( case [ 'case_id' ] ) ) if existing_case : self . db . case . find_one_and_replace ( { 'case_id' : case [ 'case_id' ] } , case , ) else : self . db . case . insert_one ( case ) return case | Add a case to the case collection |
47,304 | def delete_case ( self , case ) : mongo_case = self . case ( case ) if not mongo_case : raise CaseError ( "Tried to delete case {0} but could not find case" . format ( case . get ( 'case_id' ) ) ) LOG . info ( "Removing case {0} from database" . format ( mongo_case . get ( 'case_id' ) ) ) self . db . case . delete_one ( { '_id' : mongo_case [ '_id' ] } ) return | Delete case from the database |
47,305 | def build_profile_variant ( variant ) : chrom = variant . CHROM if chrom . startswith ( ( 'chr' , 'CHR' , 'Chr' ) ) : chrom = chrom [ 3 : ] pos = int ( variant . POS ) variant_id = get_variant_id ( variant ) ref = variant . REF alt = variant . ALT [ 0 ] maf = get_maf ( variant ) profile_variant = ProfileVariant ( variant_id = variant_id , chrom = chrom , pos = pos , ref = ref , alt = alt , maf = maf , id_column = variant . ID ) return profile_variant | Returns a ProfileVariant object |
47,306 | def add_headers ( vcf_obj , nr_cases = None , sv = False ) : vcf_obj . add_info_to_header ( { 'ID' : "Obs" , 'Number' : '1' , 'Type' : 'Integer' , 'Description' : "The number of observations for the variant" } ) if not sv : vcf_obj . add_info_to_header ( { 'ID' : "Hom" , 'Number' : '1' , 'Type' : 'Integer' , 'Description' : "The number of observed homozygotes" } ) vcf_obj . add_info_to_header ( { 'ID' : "Hem" , 'Number' : '1' , 'Type' : 'Integer' , 'Description' : "The number of observed hemizygotes" } ) if nr_cases : case_header = "##NrCases={}" . format ( nr_cases ) vcf_obj . add_to_header ( case_header ) return | Add loqus specific information to a VCF header |
47,307 | def get_file_handle ( file_path ) : LOG . debug ( "Check if file end is correct" ) if not os . path . exists ( file_path ) : raise IOError ( "No such file:{0}" . format ( file_path ) ) if not os . path . splitext ( file_path ) [ - 1 ] in VALID_ENDINGS : raise IOError ( "Not a valid vcf file name: {}" . format ( file_path ) ) vcf_obj = VCF ( file_path ) return vcf_obj | Return cyvcf2 VCF object |
47,308 | def check_vcf ( vcf_path , expected_type = 'snv' ) : LOG . info ( "Check if vcf is on correct format..." ) vcf = VCF ( vcf_path ) individuals = vcf . samples variant_type = None previous_pos = None previous_chrom = None posititon_variants = set ( ) nr_variants = 0 for nr_variants , variant in enumerate ( vcf , 1 ) : current_type = 'sv' if variant . var_type == 'sv' else 'snv' if not variant_type : variant_type = current_type if variant_type != current_type : raise VcfError ( "Vcf includes a mix of snvs and svs" ) current_chrom = variant . CHROM current_pos = variant . POS variant_id = "{0}_{1}" . format ( current_chrom , current_pos ) if variant_type == 'snv' : variant_id = get_variant_id ( variant ) if not previous_chrom : previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set ( [ variant_id ] ) continue if current_chrom != previous_chrom : previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set ( [ variant_id ] ) continue if variant_type == 'snv' : if current_pos == previous_pos : if variant_id in posititon_variants : raise VcfError ( "Variant {0} occurs several times" " in vcf" . format ( variant_id ) ) else : posititon_variants . add ( variant_id ) else : if not current_pos >= previous_pos : raise VcfError ( "Vcf if not sorted in a correct way" ) previous_pos = current_pos posititon_variants = set ( [ variant_id ] ) if variant_type != expected_type : raise VcfError ( "VCF file does not only include {0}s, please check vcf {1}" . format ( expected_type . upper ( ) , vcf_path ) ) LOG . info ( "Vcf file %s looks fine" , vcf_path ) LOG . info ( "Nr of variants in vcf: {0}" . format ( nr_variants ) ) LOG . info ( "Type of variants in vcf: {0}" . format ( variant_type ) ) vcf_info = { 'nr_variants' : nr_variants , 'variant_type' : variant_type , 'individuals' : individuals , } return vcf_info | Check if there are any problems with the vcf file |
47,309 | def is_dn ( s ) : if s == '' : return True rm = DN_REGEX . match ( s ) return rm is not None and rm . group ( 0 ) == s | Return True if s is a LDAP DN . |
47,310 | def _fold_line ( self , line ) : if len ( line ) <= self . _cols : self . _output_file . write ( line ) self . _output_file . write ( self . _line_sep ) else : pos = self . _cols self . _output_file . write ( line [ 0 : self . _cols ] ) self . _output_file . write ( self . _line_sep ) while pos < len ( line ) : self . _output_file . write ( b' ' ) end = min ( len ( line ) , pos + self . _cols - 1 ) self . _output_file . write ( line [ pos : end ] ) self . _output_file . write ( self . _line_sep ) pos = end | Write string line as one or more folded lines . |
47,311 | def _needs_base64_encoding ( self , attr_type , attr_value ) : return attr_type . lower ( ) in self . _base64_attrs or isinstance ( attr_value , bytes ) or UNSAFE_STRING_RE . search ( attr_value ) is not None | Return True if attr_value has to be base - 64 encoded . |
47,312 | def _unparse_changetype ( self , mod_len ) : if mod_len == 2 : changetype = 'add' elif mod_len == 3 : changetype = 'modify' else : raise ValueError ( "modlist item of wrong length" ) self . _unparse_attr ( 'changetype' , changetype ) | Detect and write the changetype . |
47,313 | def unparse ( self , dn , record ) : self . _unparse_attr ( 'dn' , dn ) if isinstance ( record , dict ) : self . _unparse_entry_record ( record ) elif isinstance ( record , list ) : self . _unparse_change_record ( record ) else : raise ValueError ( "Argument record must be dictionary or list" ) self . _output_file . write ( self . _line_sep ) self . records_written += 1 | Write an entry or change record to the output file . |
47,314 | def _strip_line_sep ( self , s ) : if s [ - 2 : ] == b'\r\n' : return s [ : - 2 ] elif s [ - 1 : ] == b'\n' : return s [ : - 1 ] else : return s | Strip trailing line separators from s but no other whitespaces . |
47,315 | def _iter_unfolded_lines ( self ) : line = self . _input_file . readline ( ) while line : self . line_counter += 1 self . byte_counter += len ( line ) line = self . _strip_line_sep ( line ) nextline = self . _input_file . readline ( ) while nextline and nextline [ : 1 ] == b' ' : line += self . _strip_line_sep ( nextline ) [ 1 : ] nextline = self . _input_file . readline ( ) if not line . startswith ( b'#' ) : yield line line = nextline | Iter input unfoled lines . Skip comments . |
47,316 | def _iter_blocks ( self ) : lines = [ ] for line in self . _iter_unfolded_lines ( ) : if line : lines . append ( line ) elif lines : self . records_read += 1 yield lines lines = [ ] if lines : self . records_read += 1 yield lines | Iter input lines in blocks separated by blank lines . |
47,317 | def _check_dn ( self , dn , attr_value ) : if dn is not None : self . _error ( 'Two lines starting with dn: in one record.' ) if not is_dn ( attr_value ) : self . _error ( 'No valid string-representation of ' 'distinguished name %s.' % attr_value ) | Check dn attribute for issues . |
47,318 | def _check_changetype ( self , dn , changetype , attr_value ) : if dn is None : self . _error ( 'Read changetype: before getting valid dn: line.' ) if changetype is not None : self . _error ( 'Two lines starting with changetype: in one record.' ) if attr_value not in CHANGE_TYPES : self . _error ( 'changetype value %s is invalid.' % attr_value ) | Check changetype attribute for issues . |
47,319 | def _parse_entry_record ( self , lines ) : dn = None entry = OrderedDict ( ) for line in lines : attr_type , attr_value = self . _parse_attr ( line ) if attr_type == 'dn' : self . _check_dn ( dn , attr_value ) dn = attr_value elif attr_type == 'version' and dn is None : pass else : if dn is None : self . _error ( 'First line of record does not start ' 'with "dn:": %s' % attr_type ) if attr_value is not None and attr_type . lower ( ) not in self . _ignored_attr_types : if attr_type in entry : entry [ attr_type ] . append ( attr_value ) else : entry [ attr_type ] = [ attr_value ] return dn , entry | Parse a single entry record from a list of lines . |
47,320 | def zip_estimate_state ( data , clusters , init_means = None , init_weights = None , max_iters = 10 , tol = 1e-4 , disp = True , inner_max_iters = 400 , normalize = True ) : genes , cells = data . shape if init_means is None : means , assignments = kmeans_pp ( data , clusters ) else : means = init_means . copy ( ) clusters = means . shape [ 1 ] w_init = np . random . random ( cells * clusters ) if init_weights is not None : if len ( init_weights . shape ) == 1 : init_weights = initialize_from_assignments ( init_weights , clusters ) w_init = init_weights . reshape ( cells * clusters ) m_init = means . reshape ( genes * clusters ) L , Z = zip_fit_params_mle ( data ) ll = np . inf for i in range ( max_iters ) : if disp : print ( 'iter: {0}' . format ( i ) ) w_bounds = [ ( 0 , 1.0 ) for x in w_init ] m_bounds = [ ( 0 , None ) for x in m_init ] w_objective = _create_w_objective ( means , data , Z ) w_res = minimize ( w_objective , w_init , method = 'L-BFGS-B' , jac = True , bounds = w_bounds , options = { 'disp' : disp , 'maxiter' : inner_max_iters } ) w_diff = np . sqrt ( np . sum ( ( w_res . x - w_init ) ** 2 ) ) / w_init . size w_new = w_res . x . reshape ( ( clusters , cells ) ) w_init = w_res . x m_objective = _create_m_objective ( w_new , data , Z ) m_res = minimize ( m_objective , m_init , method = 'L-BFGS-B' , jac = True , bounds = m_bounds , options = { 'disp' : disp , 'maxiter' : inner_max_iters } ) ll = m_res . fun m_diff = np . sqrt ( np . sum ( ( m_res . x - m_init ) ** 2 ) ) / m_init . size m_new = m_res . x . reshape ( ( genes , clusters ) ) m_init = m_res . x means = m_new if w_diff < tol and m_diff < tol : break if normalize : w_new = w_new / w_new . sum ( 0 ) return m_new , w_new , ll | Uses a Zero - inflated Poisson Mixture model to estimate cell states and cell state mixing weights . |
47,321 | def kmeans_pp ( data , k , centers = None ) : genes , cells = data . shape if sparse . issparse ( data ) and not sparse . isspmatrix_csc ( data ) : data = sparse . csc_matrix ( data ) num_known_centers = 0 if centers is None : centers = np . zeros ( ( genes , k ) ) else : num_known_centers = centers . shape [ 1 ] centers = np . concatenate ( ( centers , np . zeros ( ( genes , k - num_known_centers ) ) ) , 1 ) distances = np . zeros ( ( cells , k ) ) distances [ : ] = np . inf if num_known_centers == 0 : init = np . random . randint ( 0 , cells ) if sparse . issparse ( data ) : centers [ : , 0 ] = data [ : , init ] . toarray ( ) . flatten ( ) else : centers [ : , 0 ] = data [ : , init ] num_known_centers += 1 available_cells = list ( range ( cells ) ) for c in range ( num_known_centers , k ) : c2 = c - 1 if sparse . issparse ( data ) : lls = poisson_ll ( data , centers [ : , c2 : c2 + 1 ] ) . flatten ( ) distances [ : , c2 ] = 1 + lls . max ( ) - lls distances [ : , c2 ] /= distances [ : , c2 ] . max ( ) else : for cell in range ( cells ) : distances [ cell , c2 ] = poisson_dist ( data [ : , cell ] , centers [ : , c2 ] ) min_distances = np . min ( distances , 1 ) min_distances = min_distances ** 2 min_distances = min_distances [ available_cells ] min_dist = np . random . choice ( available_cells , p = min_distances / min_distances . sum ( ) ) available_cells . pop ( available_cells . index ( min_dist ) ) if sparse . issparse ( data ) : centers [ : , c ] = data [ : , min_dist ] . toarray ( ) . flatten ( ) else : centers [ : , c ] = data [ : , min_dist ] lls = poisson_ll ( data , centers ) new_assignments = np . argmax ( lls , 1 ) centers [ centers == 0.0 ] = eps return centers , new_assignments | Generates kmeans ++ initial centers . |
47,322 | def poisson_cluster ( data , k , init = None , max_iters = 100 ) : genes , cells = data . shape if sparse . issparse ( data ) and not sparse . isspmatrix_csc ( data ) : data = sparse . csc_matrix ( data ) init , assignments = kmeans_pp ( data , k , centers = init ) centers = np . copy ( init ) assignments = np . zeros ( cells ) for it in range ( max_iters ) : lls = poisson_ll ( data , centers ) new_assignments = np . argmax ( lls , 1 ) if np . equal ( assignments , new_assignments ) . all ( ) : return new_assignments , centers for c in range ( k ) : if sparse . issparse ( data ) : if data [ : , new_assignments == c ] . shape [ 0 ] == 0 : new_c , _ = kmeans_pp ( data , k , centers [ : , : c ] ) centers [ : , c ] = new_c [ : , c ] else : centers [ : , c ] = np . asarray ( data [ : , new_assignments == c ] . mean ( 1 ) ) . flatten ( ) else : if len ( data [ : , new_assignments == c ] ) == 0 : new_c , _ = kmeans_pp ( data , k , centers [ : , : c ] ) centers [ : , c ] = new_c [ : , c ] else : centers [ : , c ] = np . mean ( data [ : , new_assignments == c ] , 1 ) assignments = new_assignments return assignments , centers | Performs Poisson hard EM on the given data . |
47,323 | def cases ( ctx , case_id , to_json ) : adapter = ctx . obj [ 'adapter' ] cases = [ ] if case_id : case_obj = adapter . case ( { 'case_id' : case_id } ) if not case_obj : LOG . info ( "Case {0} does not exist in database" . format ( case_id ) ) return case_obj [ '_id' ] = str ( case_obj [ '_id' ] ) cases . append ( case_obj ) else : cases = adapter . cases ( ) if cases . count ( ) == 0 : LOG . info ( "No cases found in database" ) ctx . abort ( ) if to_json : click . echo ( json . dumps ( cases ) ) return click . echo ( "#case_id\tvcf_path" ) for case_obj in cases : click . echo ( "{0}\t{1}" . format ( case_obj . get ( 'case_id' ) , case_obj . get ( 'vcf_path' ) ) ) | Display cases in the database . |
47,324 | def variants ( ctx , variant_id , chromosome , end_chromosome , start , end , variant_type , sv_type ) : if sv_type : variant_type = 'sv' adapter = ctx . obj [ 'adapter' ] if ( start or end ) : if not ( chromosome and start and end ) : LOG . warning ( "Regions must be specified with chromosome, start and end" ) return if variant_id : variant = adapter . get_variant ( { '_id' : variant_id } ) if variant : click . echo ( variant ) else : LOG . info ( "Variant {0} does not exist in database" . format ( variant_id ) ) return if variant_type == 'snv' : result = adapter . get_variants ( chromosome = chromosome , start = start , end = end ) else : LOG . info ( "Search for svs" ) result = adapter . get_sv_variants ( chromosome = chromosome , end_chromosome = end_chromosome , sv_type = sv_type , pos = start , end = end ) i = 0 for variant in result : i += 1 pp ( variant ) LOG . info ( "Number of variants found in database: %s" , i ) | Display variants in the database . |
47,325 | def index ( ctx , view ) : adapter = ctx . obj [ 'adapter' ] if view : click . echo ( adapter . indexes ( ) ) return adapter . ensure_indexes ( ) | Index the database . |
47,326 | def ddot ( L , R , left = None , out = None ) : r L = asarray ( L , float ) R = asarray ( R , float ) if left is None : ok = min ( L . ndim , R . ndim ) == 1 and max ( L . ndim , R . ndim ) == 2 if not ok : msg = "Wrong array layout. One array should have" msg += " ndim=1 and the other one ndim=2." raise ValueError ( msg ) left = L . ndim == 1 if left : if out is None : out = copy ( R ) L = L . reshape ( list ( L . shape ) + [ 1 ] * ( R . ndim - 1 ) ) return multiply ( L , R , out = out ) else : if out is None : out = copy ( L ) return multiply ( L , R , out = out ) | r Dot product of a matrix and a diagonal one . |
47,327 | def cdot ( L , out = None ) : r L = asarray ( L , float ) layout_error = "Wrong matrix layout." if L . ndim != 2 : raise ValueError ( layout_error ) if L . shape [ 0 ] != L . shape [ 1 ] : raise ValueError ( layout_error ) if out is None : out = empty ( ( L . shape [ 0 ] , L . shape [ 1 ] ) , float ) return einsum ( "ij,kj->ik" , L , L , out = out ) | r Product of a Cholesky matrix with itself transposed . |
47,328 | def nanrankdata ( a , axis = - 1 , inplace = False ) : from scipy . stats import rankdata if hasattr ( a , "dtype" ) and issubdtype ( a . dtype , integer ) : raise ValueError ( "Integer type is not supported." ) if isinstance ( a , ( tuple , list ) ) : if inplace : raise ValueError ( "Can't use `inplace=True` for {}." . format ( type ( a ) ) ) a = asarray ( a , float ) orig_shape = a . shape if a . ndim == 1 : a = a . reshape ( orig_shape + ( 1 , ) ) if not inplace : a = a . copy ( ) def rank1d ( x ) : idx = ~ isnan ( x ) x [ idx ] = rankdata ( x [ idx ] ) return x a = a . swapaxes ( 1 , axis ) a = apply_along_axis ( rank1d , 0 , a ) a = a . swapaxes ( 1 , axis ) return a . reshape ( orig_shape ) | Rank data for arrays contaning NaN values . |
47,329 | def plogdet ( K ) : r egvals = eigvalsh ( K ) return npsum ( log ( egvals [ egvals > epsilon ] ) ) | r Log of the pseudo - determinant . |
47,330 | def economic_qs ( K , epsilon = sqrt ( finfo ( float ) . eps ) ) : r ( S , Q ) = eigh ( K ) nok = abs ( max ( Q [ 0 ] . min ( ) , Q [ 0 ] . max ( ) , key = abs ) ) < epsilon nok = nok and abs ( max ( K . min ( ) , K . max ( ) , key = abs ) ) >= epsilon if nok : from scipy . linalg import eigh as sp_eigh ( S , Q ) = sp_eigh ( K ) ok = S >= epsilon nok = logical_not ( ok ) S0 = S [ ok ] Q0 = Q [ : , ok ] Q1 = Q [ : , nok ] return ( ( Q0 , Q1 ) , S0 ) | r Economic eigen decomposition for symmetric matrices . |
47,331 | def cartesian ( shape ) : r n = len ( shape ) idx = [ slice ( 0 , s ) for s in shape ] g = rollaxis ( mgrid [ idx ] , 0 , n + 1 ) return g . reshape ( ( prod ( shape ) , n ) ) | r Cartesian indexing . |
47,332 | def unique ( ar ) : r import dask . array as da if isinstance ( ar , da . core . Array ) : return da . unique ( ar ) return _unique ( ar ) | r Find the unique elements of an array . |
47,333 | def lu_slogdet ( LU ) : r LU = ( asarray ( LU [ 0 ] , float ) , asarray ( LU [ 1 ] , float ) ) adet = _sum ( log ( _abs ( LU [ 0 ] . diagonal ( ) ) ) ) s = prod ( sign ( LU [ 0 ] . diagonal ( ) ) ) nrows_exchange = LU [ 1 ] . size - _sum ( LU [ 1 ] == arange ( LU [ 1 ] . size , dtype = "int32" ) ) odd = nrows_exchange % 2 == 1 if odd : s *= - 1.0 return ( s , adet ) | r Natural logarithm of a LU decomposition . |
47,334 | def lu_solve ( LU , b ) : r from scipy . linalg import lu_solve as sp_lu_solve LU = ( asarray ( LU [ 0 ] , float ) , asarray ( LU [ 1 ] , float ) ) b = asarray ( b , float ) return sp_lu_solve ( LU , b , check_finite = False ) | r Solve for LU decomposition . |
47,335 | def lstsq ( A , b ) : r A = asarray ( A , float ) b = asarray ( b , float ) if A . ndim == 1 : A = A [ : , newaxis ] if A . shape [ 1 ] == 1 : return dot ( A . T , b ) / squeeze ( dot ( A . T , A ) ) rcond = finfo ( double ) . eps * max ( * A . shape ) return npy_lstsq ( A , b , rcond = rcond ) [ 0 ] | r Return the least - squares solution to a linear matrix equation . |
47,336 | def economic_svd ( G , epsilon = sqrt ( finfo ( float ) . eps ) ) : r from scipy . linalg import svd G = asarray ( G , float ) ( U , S , V ) = svd ( G , full_matrices = False , check_finite = False ) ok = S >= epsilon S = S [ ok ] U = U [ : , ok ] V = V [ ok , : ] return ( U , S , V ) | r Economic Singular Value Decomposition . |
47,337 | def hsolve ( A , y ) : r n = _norm ( A [ 0 , 0 ] , A [ 1 , 0 ] ) u0 = A [ 0 , 0 ] - n u1 = A [ 1 , 0 ] nu = _norm ( u0 , u1 ) with errstate ( invalid = "ignore" , divide = "ignore" ) : v0 = nan_to_num ( u0 / nu ) v1 = nan_to_num ( u1 / nu ) B00 = 1 - 2 * v0 * v0 B01 = 0 - 2 * v0 * v1 B11 = 1 - 2 * v1 * v1 D00 = B00 * A [ 0 , 0 ] + B01 * A [ 1 , 0 ] D01 = B00 * A [ 0 , 1 ] + B01 * A [ 1 , 1 ] D11 = B01 * A [ 0 , 1 ] + B11 * A [ 1 , 1 ] b0 = y [ 0 ] - 2 * y [ 0 ] * v0 * v0 - 2 * y [ 1 ] * v0 * v1 b1 = y [ 1 ] - 2 * y [ 0 ] * v1 * v0 - 2 * y [ 1 ] * v1 * v1 n = _norm ( D00 , D01 ) u0 = D00 - n u1 = D01 nu = _norm ( u0 , u1 ) with errstate ( invalid = "ignore" , divide = "ignore" ) : v0 = nan_to_num ( u0 / nu ) v1 = nan_to_num ( u1 / nu ) E00 = 1 - 2 * v0 * v0 E01 = 0 - 2 * v0 * v1 E11 = 1 - 2 * v1 * v1 F00 = E00 * D00 + E01 * D01 F01 = E01 * D11 F11 = E11 * D11 F11 = ( npy_abs ( F11 ) > epsilon . small ) * F11 with errstate ( divide = "ignore" , invalid = "ignore" ) : Fi00 = nan_to_num ( F00 / F00 / F00 ) Fi11 = nan_to_num ( F11 / F11 / F11 ) Fi10 = nan_to_num ( - ( F01 / F00 ) * Fi11 ) c0 = Fi00 * b0 c1 = Fi10 * b0 + Fi11 * b1 x0 = E00 * c0 + E01 * c1 x1 = E01 * c0 + E11 * c1 return array ( [ x0 , x1 ] ) | r Solver for the linear equations of two variables and equations only . |
47,338 | def rsolve ( A , b , epsilon = _epsilon ) : r A = asarray ( A , float ) b = asarray ( b , float ) if A . shape [ 0 ] == 0 : return zeros ( ( A . shape [ 1 ] , ) ) if A . shape [ 1 ] == 0 : return zeros ( ( 0 , ) ) try : x = lstsq ( A , b , rcond = epsilon ) r = sum ( x [ 3 ] > epsilon ) if r == 0 : return zeros ( A . shape [ 1 ] ) return x [ 0 ] except ( ValueError , LinAlgError ) as e : warnings . warn ( str ( e ) , RuntimeWarning ) return solve ( A , b ) | r Robust solve for the linear equations . |
47,339 | def kron_dot ( A , B , C , out = None ) : r from numpy import dot , zeros , asarray A = asarray ( A ) B = asarray ( B ) C = asarray ( C ) if out is None : out = zeros ( ( B . shape [ 0 ] , A . shape [ 0 ] ) ) dot ( B , dot ( C , A . T ) , out = out ) return out | r Kronecker product followed by dot product . |
47,340 | def check_semidefinite_positiveness ( A ) : B = empty_like ( A ) B [ : ] = A B [ diag_indices_from ( B ) ] += sqrt ( finfo ( float ) . eps ) try : cholesky ( B ) except LinAlgError : return False return True | Check if A is a semi - definite positive matrix . |
47,341 | def check_symmetry ( A ) : A = asanyarray ( A ) if A . ndim != 2 : raise ValueError ( "Checks symmetry only for bi-dimensional arrays." ) if A . shape [ 0 ] != A . shape [ 1 ] : return False return abs ( A - A . T ) . max ( ) < sqrt ( finfo ( float ) . eps ) | Check if A is a symmetric matrix . |
47,342 | def cho_solve ( L , b ) : r from scipy . linalg import cho_solve as sp_cho_solve L = asarray ( L , float ) b = asarray ( b , float ) if L . size == 0 : if b . size != 0 : raise ValueError ( "Dimension mismatch between L and b." ) return empty ( b . shape ) return sp_cho_solve ( ( L , True ) , b , check_finite = False ) | r Solve for Cholesky decomposition . |
47,343 | def file_or_resource ( fname = None ) : if fname is not None : filename = os . path . expanduser ( fname ) resource_package = opentargets_validator . __name__ resource_path = os . path . sep . join ( ( 'resources' , filename ) ) abs_filename = os . path . join ( os . path . abspath ( os . getcwd ( ) ) , filename ) if not os . path . isabs ( filename ) else filename return abs_filename if os . path . isfile ( abs_filename ) else res . resource_filename ( resource_package , resource_path ) | get filename and check if in getcwd then get from the package resources folder |
47,344 | def run_process_dummy ( command , ** kwargs ) : warnings . warn ( "procrunner.run_process_dummy() is deprecated" , DeprecationWarning , stacklevel = 2 ) time_start = time . strftime ( "%Y-%m-%d %H:%M:%S GMT" , time . gmtime ( ) ) logger . info ( "run_process is disabled. Requested command: %s" , command ) result = ReturnObject ( { "exitcode" : 0 , "command" : command , "stdout" : "" , "stderr" : "" , "timeout" : False , "runtime" : 0 , "time_start" : time_start , "time_end" : time_start , } ) if kwargs . get ( "stdin" ) is not None : result . update ( { "stdin_bytes_sent" : len ( kwargs [ "stdin" ] ) , "stdin_bytes_remain" : 0 } ) return result | A stand - in function that returns a valid result dictionary indicating a successful execution . The external process is not run . |
47,345 | def run_process ( * args , ** kwargs ) : warnings . warn ( "procrunner.run_process() is deprecated and has been renamed to run()" , DeprecationWarning , stacklevel = 2 , ) return run ( * args , ** kwargs ) | API used up to version 0 . 2 . 0 . |
47,346 | def get_output ( self ) : self . _closing = True if not self . has_finished ( ) : if self . _debug : underrun_debug_timer = timeit . default_timer ( ) logger . warning ( "NBSR underrun" ) self . _thread . join ( ) if not self . has_finished ( ) : if self . _debug : logger . debug ( "NBSR join after %f seconds, underrun not resolved" % ( timeit . default_timer ( ) - underrun_debug_timer ) ) raise Exception ( "thread did not terminate" ) if self . _debug : logger . debug ( "NBSR underrun resolved after %f seconds" % ( timeit . default_timer ( ) - underrun_debug_timer ) ) if self . _closed : raise Exception ( "streamreader double-closed" ) self . _closed = True data = self . _buffer . getvalue ( ) self . _buffer . close ( ) return data | Retrieve the stored data in full . This call may block if the reading thread has not yet terminated . |
47,347 | def sum2diag ( A , D , out = None ) : r A = asarray ( A , float ) D = asarray ( D , float ) if out is None : out = copy ( A ) else : copyto ( out , A ) einsum ( "ii->i" , out ) [ : ] += D return out | r Add values D to the diagonal of matrix A . |
47,348 | def kata2hira ( text , ignore = '' ) : if ignore : k2h_map = _exclude_ignorechar ( ignore , K2H_TABLE . copy ( ) ) return _convert ( text , k2h_map ) return _convert ( text , K2H_TABLE ) | Convert Full - width Katakana to Hiragana |
47,349 | def histpoints ( x , bins = None , xerr = None , yerr = 'gamma' , normed = False , ** kwargs ) : import matplotlib . pyplot as plt if bins is None : bins = calc_nbins ( x ) h , bins = np . histogram ( x , bins = bins ) width = bins [ 1 ] - bins [ 0 ] center = ( bins [ : - 1 ] + bins [ 1 : ] ) / 2 area = sum ( h * width ) if isinstance ( yerr , str ) : yerr = poisson_limits ( h , yerr ) if xerr == 'binwidth' : xerr = width / 2 if normed : h = h / area yerr = yerr / area area = 1. if not 'color' in kwargs : kwargs [ 'color' ] = 'black' if not 'fmt' in kwargs : kwargs [ 'fmt' ] = 'o' plt . errorbar ( center , h , xerr = xerr , yerr = yerr , ** kwargs ) return center , ( yerr [ 0 ] , h , yerr [ 1 ] ) , area | Plot a histogram as a series of data points . |
47,350 | def pool ( self ) : self . _pool = self . _pool or eventlet . GreenPool ( size = self . pool_size ) return self . _pool | Get an eventlet pool used to dispatch requests . |
47,351 | def start ( self ) : if self . pid is not None : LOG . error ( "The process is already running with pid {0}." . format ( self . pid ) ) sys . exit ( exit . ALREADY_RUNNING ) self . daemonize ( ) LOG . info ( "Beginning run loop for process." ) try : self . run ( ) except Exception : LOG . exception ( "Uncaught exception in the daemon run() method." ) self . stop ( ) sys . exit ( exit . RUN_FAILURE ) | Start the process with daemonization . |
47,352 | def stop ( self ) : if self . pid is None : return None try : while True : self . send ( signal . SIGTERM ) time . sleep ( 0.1 ) except RuntimeError as err : if "No such process" in str ( err ) : LOG . info ( "Succesfully stopped the process." ) return None LOG . exception ( "Failed to stop the process:" ) sys . exit ( exit . STOP_FAILED ) except TypeError as err : if "an integer is required" in str ( err ) : LOG . info ( "Succesfully stopped the process." ) return None LOG . exception ( "Failed to stop the process:" ) sys . exit ( exit . STOP_FAILED ) | Stop the daemonized process . |
47,353 | def handle ( self , signum , handler ) : if not isinstance ( signum , int ) : raise TypeError ( "Signals must be given as integers. Got {0}." . format ( type ( signum ) , ) , ) if not callable ( handler ) : raise TypeError ( "Signal handlers must be callable." , ) signal . signal ( signum , self . _handle_signals ) self . _handlers [ signum ] . append ( handler ) | Set a function to run when the given signal is recieved . |
47,354 | def send ( self , signum ) : if not isinstance ( signum , int ) : raise TypeError ( "Signals must be given as integers. Got {0}." . format ( type ( signum ) , ) , ) try : os . kill ( self . pid , signum ) except OSError as err : if "No such process" in err . strerror : raise RuntimeError ( "No such process {0}." . format ( self . pid ) ) raise err | Send the given signal to the running process . |
47,355 | def _handle_signals ( self , signum , frame ) : if signum in self . kill_signals : return self . shutdown ( signum ) for handler in self . _handlers [ signum ] : handler ( ) | Handler for all signals . |
47,356 | def shutdown ( self , signum ) : dirty = False for handler in self . _handlers [ signum ] : try : handler ( ) except : LOG . exception ( "A shutdown handler failed to execute:" ) dirty = True del self . pid if dirty : sys . exit ( exit . SHUTDOWN_FAILED ) return None sys . exit ( exit . SUCCESS ) return None | Handle all signals which trigger a process stop . |
47,357 | def load_from_remote ( remote_name , owner = None ) : from . . import GMQLDataset pmg = get_python_manager ( ) remote_manager = get_remote_manager ( ) parser = remote_manager . get_dataset_schema ( remote_name , owner ) source_table = get_source_table ( ) id = source_table . search_source ( remote = remote_name ) if id is None : id = source_table . add_source ( remote = remote_name , parser = parser ) index = pmg . read_dataset ( str ( id ) , parser . get_gmql_parser ( ) ) remote_sources = [ id ] return GMQLDataset . GMQLDataset ( index = index , location = "remote" , path_or_name = remote_name , remote_sources = remote_sources ) | Loads the data from a remote repository . |
47,358 | def scan ( interface ) : interface = _get_bytes ( interface ) head = ffi . new ( 'wireless_scan_head *' ) with iwlib_socket ( ) as sock : range = _get_range_info ( interface , sock = sock ) if iwlib . iw_scan ( sock , interface , range . we_version_compiled , head ) != 0 : errno = ffi . errno strerror = "Error while scanning: %s" % os . strerror ( errno ) raise OSError ( errno , strerror ) results = [ ] scan = head . result buf = ffi . new ( 'char []' , 1024 ) while scan != ffi . NULL : parsed_scan = { } if scan . b . has_mode : parsed_scan [ 'Mode' ] = ffi . string ( iwlib . iw_operation_mode [ scan . b . mode ] ) if scan . b . essid_on : parsed_scan [ 'ESSID' ] = ffi . string ( scan . b . essid ) else : parsed_scan [ 'ESSID' ] = b'Auto' if scan . has_ap_addr : iwlib . iw_ether_ntop ( ffi . cast ( 'struct ether_addr *' , scan . ap_addr . sa_data ) , buf ) if scan . b . has_mode and scan . b . mode == iwlib . IW_MODE_ADHOC : parsed_scan [ 'Cell' ] = ffi . string ( buf ) else : parsed_scan [ 'Access Point' ] = ffi . string ( buf ) if scan . has_maxbitrate : iwlib . iw_print_bitrate ( buf , len ( buf ) , scan . maxbitrate . value ) parsed_scan [ 'BitRate' ] = ffi . string ( buf ) if scan . has_stats : parsed_scan [ 'stats' ] = _parse_stats ( scan . stats ) results . append ( parsed_scan ) scan = scan . next return results | Perform a scan for access points in the area . |
47,359 | def logout ( self ) : url = self . address + "/logout" header = self . __check_authentication ( ) response = requests . get ( url , headers = header ) if response . status_code != 200 : raise ValueError ( "Code {}. {}" . format ( response . status_code , response . json ( ) . get ( "error" ) ) ) | Logout from the remote account |
47,360 | def get_dataset_list ( self ) : url = self . address + "/datasets" header = self . __check_authentication ( ) response = requests . get ( url , headers = header ) response = response . json ( ) datasets = response . get ( "datasets" ) res = pd . DataFrame . from_dict ( datasets ) return self . process_info_list ( res , "info" ) | Returns the list of available datasets for the current user . |
47,361 | def get_dataset_samples ( self , dataset_name , owner = None ) : if isinstance ( owner , str ) : owner = owner . lower ( ) dataset_name = owner + "." + dataset_name header = self . __check_authentication ( ) url = self . address + "/datasets/" + dataset_name response = requests . get ( url , headers = header ) if response . status_code != 200 : raise ValueError ( "Code {}: {}" . format ( response . status_code , response . json ( ) . get ( "error" ) ) ) response = response . json ( ) samples = response . get ( "samples" ) if len ( samples ) == 0 : return None res = pd . DataFrame . from_dict ( samples ) return self . process_info_list ( res , "info" ) | Get the list of samples of a specific remote dataset . |
47,362 | def get_dataset_schema ( self , dataset_name , owner = None ) : if isinstance ( owner , str ) : owner = owner . lower ( ) dataset_name = owner + "." + dataset_name url = self . address + "/datasets/" + dataset_name + "/schema" header = self . __check_authentication ( ) response = requests . get ( url , headers = header ) if response . status_code != 200 : raise ValueError ( "Code {}: {}" . format ( response . status_code , response . json ( ) . get ( "error" ) ) ) response = response . json ( ) name = response . get ( "name" ) schemaType = response . get ( "type" ) coordinates_system = response . get ( "coordinate_system" ) fields = response . get ( "fields" ) i = 0 chrPos , startPos , stopPos , strandPos = None , None , None , None otherPos = [ ] if schemaType == GTF : chrPos = 0 startPos = 3 stopPos = 4 strandPos = 6 otherPos = [ ( 1 , 'source' , 'string' ) , ( 2 , 'feature' , 'string' ) , ( 5 , 'score' , 'float' ) , ( 7 , 'frame' , 'string' ) ] for field in fields : fieldName = field . get ( "name" ) fieldType = field . get ( "type" ) . lower ( ) if fieldName . lower ( ) not in { 'seqname' , 'start' , 'end' , 'strand' , 'source' , 'feature' , 'score' , 'frame' } : otherPos . append ( ( i , fieldName , fieldType ) ) i += 1 else : for field in fields : fieldName = field . get ( "name" ) fieldType = field . get ( "type" ) . lower ( ) if fieldName . lower ( ) in chr_aliases and chrPos is None : chrPos = i elif fieldName . lower ( ) in start_aliases and startPos is None : startPos = i elif fieldName . lower ( ) in stop_aliases and stopPos is None : stopPos = i elif fieldName . lower ( ) in strand_aliases and strandPos is None : strandPos = i else : otherPos . append ( ( i , fieldName , fieldType ) ) i += 1 if len ( otherPos ) == 0 : otherPos = None return RegionParser ( chrPos = chrPos , startPos = startPos , stopPos = stopPos , strandPos = strandPos , otherPos = otherPos , schema_format = schemaType , coordinate_system = coordinates_system , delimiter = "\t" , parser_name = name ) | Given a dataset name it returns a BedParser coherent with the schema of it |
47,363 | def upload_dataset ( self , dataset , dataset_name , schema_path = None ) : url = self . address + "/datasets/" + dataset_name + "/uploadSample" header = self . __check_authentication ( ) fields = dict ( ) remove = False if isinstance ( dataset , GDataframe ) : tmp_path = TempFileManager . get_new_dataset_tmp_folder ( ) dataset . to_dataset_files ( local_path = tmp_path ) dataset = tmp_path remove = True if not isinstance ( dataset , str ) : raise TypeError ( "Dataset can be a path or a GDataframe. {} was passed" . format ( type ( dataset ) ) ) file_paths , schema_path_found = Loader . get_file_paths ( dataset ) if schema_path is None : schema_path = schema_path_found fields [ 'schema' ] = ( os . path . basename ( schema_path ) , open ( schema_path , "rb" ) , 'application/octet-stream' ) for i , file in enumerate ( file_paths ) : fields [ "file" + str ( i + 1 ) ] = ( os . path . basename ( file ) , open ( file , "rb" ) , 'application/octet-stream' ) encoder = MultipartEncoder ( fields ) callback = create_callback ( encoder , len ( fields ) ) m_encoder = MultipartEncoderMonitor ( encoder , callback ) header [ 'Content-Type' ] = m_encoder . content_type self . logger . debug ( "Uploading dataset at {} with name {}" . format ( dataset , dataset_name ) ) response = requests . post ( url , data = m_encoder , headers = header ) for fn in fields . keys ( ) : _ , f , _ = fields [ fn ] f . close ( ) if response . status_code != 200 : raise ValueError ( "Code {}: {}" . format ( response . status_code , response . content ) ) if remove : TempFileManager . delete_tmp_dataset ( dataset ) | Upload to the repository an entire dataset from a local path |
47,364 | def delete_dataset ( self , dataset_name ) : url = self . address + "/datasets/" + dataset_name header = self . __check_authentication ( ) response = requests . delete ( url , headers = header ) if response . status_code != 200 : raise ValueError ( "Code {}: {}" . format ( response . status_code , response . json ( ) . get ( "error" ) ) ) self . logger . debug ( "Dataset {} was deleted from the repository" . format ( dataset_name ) ) | Deletes the dataset having the specified name |
47,365 | def download_dataset ( self , dataset_name , local_path , how = "stream" ) : if not os . path . isdir ( local_path ) : os . makedirs ( local_path ) else : raise ValueError ( "Path {} already exists!" . format ( local_path ) ) local_path = os . path . join ( local_path , FILES_FOLDER ) os . makedirs ( local_path ) if how == 'zip' : return self . download_as_zip ( dataset_name , local_path ) elif how == 'stream' : return self . download_as_stream ( dataset_name , local_path ) else : raise ValueError ( "how must be {'zip', 'stream'}" ) | It downloads from the repository the specified dataset and puts it in the specified local folder |
47,366 | def query ( self , query , output_path = None , file_name = "query" , output = "tab" ) : header = self . __check_authentication ( ) header [ 'Content-Type' ] = "text/plain" output = output . lower ( ) if output not in [ 'tab' , 'gtf' ] : raise ValueError ( "output must be 'tab' or 'gtf'" ) url = self . address + "/queries/run/" + file_name + '/' + output response = requests . post ( url , data = query , headers = header ) if response . status_code != 200 : raise ValueError ( "Code {}. {}" . format ( response . status_code , response . json ( ) . get ( "error" ) ) ) response = response . json ( ) jobid = response . get ( "id" ) self . logger . debug ( "JobId: {}. Waiting for the result" . format ( jobid ) ) status_resp = self . _wait_for_result ( jobid ) datasets = status_resp . get ( "datasets" ) return self . __process_result_datasets ( datasets , output_path ) | Execute a GMQL textual query on the remote server . |
47,367 | def trace_job ( self , jobId ) : header = self . __check_authentication ( ) status_url = self . address + "/jobs/" + jobId + "/trace" status_resp = requests . get ( status_url , headers = header ) if status_resp . status_code != 200 : raise ValueError ( "Code {}. {}" . format ( status_resp . status_code , status_resp . json ( ) . get ( "error" ) ) ) return status_resp . json ( ) | Get information about the specified remote job |
47,368 | def set_mode ( how ) : global __mode if how == "local" : __mode = how elif how == "remote" : __mode = how else : raise ValueError ( "how must be 'local' or 'remote'" ) | Sets the behavior of the API |
47,369 | def set_progress ( how ) : global __progress_bar if isinstance ( how , bool ) : __progress_bar = how else : raise ValueError ( "how must be a boolean. {} was found" . format ( type ( how ) ) ) | Enables or disables the progress bars for the loading writing and downloading of datasets |
47,370 | def set_meta_profiling ( how ) : global __metadata_profiling if isinstance ( how , bool ) : __metadata_profiling = how else : raise TypeError ( "how must be boolean. {} was provided" . format ( type ( how ) ) ) | Enables or disables the profiling of metadata at the loading of a GMQLDataset |
47,371 | def parse_regions ( self , path ) : if self . schema_format . lower ( ) == GTF . lower ( ) : res = self . _parse_gtf_regions ( path ) else : res = self . _parse_tab_regions ( path ) return res | Given a file path it loads it into memory as a Pandas dataframe |
47,372 | def get_attributes ( self ) : attr = [ 'chr' , 'start' , 'stop' ] if self . strandPos is not None : attr . append ( 'strand' ) if self . otherPos : for i , o in enumerate ( self . otherPos ) : attr . append ( o [ 1 ] ) return attr | Returns the unordered list of attributes |
47,373 | def get_ordered_attributes ( self ) : attrs = self . get_attributes ( ) attr_arr = np . array ( attrs ) poss = [ self . chrPos , self . startPos , self . stopPos ] if self . strandPos is not None : poss . append ( self . strandPos ) if self . otherPos : for o in self . otherPos : poss . append ( o [ 0 ] ) idx_sort = np . array ( poss ) . argsort ( ) return attr_arr [ idx_sort ] . tolist ( ) | Returns the ordered list of attributes |
47,374 | def get_types ( self ) : types = [ str , int , int ] if self . strandPos is not None : types . append ( str ) if self . otherPos : for o in self . otherPos : types . append ( o [ 2 ] ) return types | Returns the unordered list of data types |
47,375 | def get_ordered_types ( self ) : types = self . get_types ( ) types_arr = np . array ( types ) poss = [ self . chrPos , self . startPos , self . stopPos ] if self . strandPos is not None : poss . append ( self . strandPos ) if self . otherPos : for o in self . otherPos : poss . append ( o [ 0 ] ) idx_sort = np . array ( poss ) . argsort ( ) return types_arr [ idx_sort ] . tolist ( ) | Returns the ordered list of data types |
47,376 | def xmeans ( cls , initial_centers = None , kmax = 20 , tolerance = 0.025 , criterion = splitting_type . BAYESIAN_INFORMATION_CRITERION , ccore = False ) : model = xmeans ( None , initial_centers , kmax , tolerance , criterion , ccore ) return cls ( model ) | Constructor of the x - means clustering . rst algorithm |
47,377 | def clarans ( cls , number_clusters , num_local , max_neighbour ) : model = clarans ( None , number_clusters , num_local , max_neighbour ) return cls ( model ) | Constructor of the CLARANS clustering . rst algorithm |
47,378 | def rock ( cls , data , eps , number_clusters , threshold = 0.5 , ccore = False ) : data = cls . input_preprocess ( data ) model = rock ( data , eps , number_clusters , threshold , ccore ) return cls ( model ) | Constructor of the ROCK cluster analysis algorithm |
47,379 | def optics ( cls , data , eps , minpts , ccore = False ) : data = cls . input_preprocess ( data ) model = optics ( data , eps , minpts ) return cls ( model ) | Constructor of OPTICS clustering . rst algorithm |
47,380 | def is_pyclustering_instance ( model ) : return any ( isinstance ( model , i ) for i in [ xmeans , clarans , rock , optics ] ) | Checks if the clustering . rst algorithm belongs to pyclustering |
47,381 | def fit ( self , data = None ) : if self . is_pyclustering_instance ( self . model ) : if isinstance ( self . model , xmeans ) : data = self . input_preprocess ( data ) self . model . _xmeans__pointer_data = data elif isinstance ( self . model , clarans ) : data = self . input_preprocess ( data ) self . model . _clarans__pointer_data = data self . model . process ( ) else : self . model . fit ( data ) return self | Performs clustering . rst |
47,382 | def _labels_from_pyclusters ( self ) : clusters = self . model . get_clusters ( ) labels = [ ] for i in range ( 0 , len ( clusters ) ) : for j in clusters [ i ] : labels . insert ( int ( j ) , i ) return labels | Computes and returns the list of labels indicating the data points and the corresponding cluster ids . |
47,383 | def retrieve_cluster ( self , df , cluster_no ) : if self . is_pyclustering_instance ( self . model ) : clusters = self . model . get_clusters ( ) mask = [ ] for i in range ( 0 , df . shape [ 0 ] ) : mask . append ( i in clusters [ cluster_no ] ) else : mask = self . model . labels_ == cluster_no return df [ mask ] | Extracts the cluster at the given index from the input dataframe |
47,384 | def get_labels ( obj ) : if Clustering . is_pyclustering_instance ( obj . model ) : return obj . _labels_from_pyclusters else : return obj . model . labels_ | Retrieve the labels of a clustering . rst object |
47,385 | def silhouette_n_clusters ( data , k_min , k_max , distance = 'euclidean' ) : k_range = range ( k_min , k_max ) k_means_var = [ Clustering . kmeans ( k ) . fit ( data ) for k in k_range ] silhouette_scores = [ obj . silhouette_score ( data = data , metric = distance ) for obj in k_means_var ] fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . plot ( k_range , silhouette_scores , 'b*-' ) ax . set_ylim ( ( - 1 , 1 ) ) plt . grid ( True ) plt . xlabel ( 'n_clusters' ) plt . ylabel ( 'The silhouette score' ) plt . title ( 'Silhouette score vs. k' ) plt . show ( ) | Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually |
47,386 | def materialize ( datasets ) : from . . import GMQLDataset if isinstance ( datasets , dict ) : result = dict ( ) for output_path in datasets . keys ( ) : dataset = datasets [ output_path ] if not isinstance ( dataset , GMQLDataset . GMQLDataset ) : raise TypeError ( "The values of the dictionary must be GMQLDataset." " {} was given" . format ( type ( dataset ) ) ) gframe = dataset . materialize ( output_path ) result [ output_path ] = gframe elif isinstance ( datasets , list ) : result = [ ] for dataset in datasets : if not isinstance ( dataset , GMQLDataset . GMQLDataset ) : raise TypeError ( "The values of the list must be GMQLDataset." " {} was given" . format ( type ( dataset ) ) ) gframe = dataset . materialize ( ) result . append ( gframe ) else : raise TypeError ( "The input must be a dictionary of a list. " "{} was given" . format ( type ( datasets ) ) ) return result | Multiple materializations . Enables the user to specify a set of GMQLDataset to be materialized . The engine will perform all the materializations at the same time if an output path is provided while will perform each operation separately if the output_path is not specified . |
47,387 | def retrieve_bicluster ( self , df , row_no , column_no ) : res = df [ self . model . biclusters_ [ 0 ] [ row_no ] ] bicluster = res [ res . columns [ self . model . biclusters_ [ 1 ] [ column_no ] ] ] return bicluster | Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe . |
47,388 | def bicluster_similarity ( self , reference_model ) : similarity_score = consensus_score ( self . model . biclusters_ , reference_model . biclusters_ ) return similarity_score | Calculates the similarity between the current model of biclusters and the reference model of biclusters |
47,389 | def merge ( self , samples_uuid ) : all_meta_data = pd . DataFrame ( ) for dm in self . data_model : all_meta_data = pd . concat ( [ all_meta_data , dm . meta ] , axis = 0 ) group = all_meta_data . groupby ( [ samples_uuid ] ) [ 'sample' ] sample_sets = group . apply ( list ) . values merged_df = pd . DataFrame ( ) multi_index = list ( map ( list , zip ( * sample_sets ) ) ) multi_index_names = list ( range ( 0 , len ( sample_sets [ 0 ] ) ) ) i = 1 for pair in sample_sets : i += 1 numbers = list ( range ( 0 , len ( pair ) ) ) df_temp = pd . DataFrame ( ) for n in numbers : try : df_temp = pd . concat ( [ df_temp , self . data_model [ n ] . data . loc [ pair [ n ] ] ] , axis = 1 ) except : pass merged_df = pd . concat ( [ merged_df , df_temp . T . bfill ( ) . iloc [ [ 0 ] ] ] , axis = 0 ) multi_index = np . asarray ( multi_index ) multi_index = pd . MultiIndex . from_arrays ( multi_index , names = multi_index_names ) merged_df . index = multi_index return merged_df | The method to merge the datamodels belonging to different references |
47,390 | def impute_using_statistics ( df , method = 'min' ) : sf = SimpleFill ( method ) imputed_matrix = sf . complete ( df . values ) imputed_df = pd . DataFrame ( imputed_matrix , df . index , df . columns ) return imputed_df | Imputes the missing values by the selected statistical property of each column |
47,391 | def impute_knn ( df , k = 3 ) : imputed_matrix = KNN ( k = k ) . complete ( df . values ) imputed_df = pd . DataFrame ( imputed_matrix , df . index , df . columns ) return imputed_df | Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data . |
47,392 | def isin ( self , values ) : if not isinstance ( values , list ) : raise TypeError ( "Input should be a string. {} was provided" . format ( type ( values ) ) ) if not ( self . name . startswith ( "(" ) and self . name . endswith ( ")" ) ) : first = True new_condition = None for v in values : if first : first = False new_condition = self . __eq__ ( v ) else : new_condition = new_condition . __or__ ( self . __eq__ ( v ) ) return new_condition else : raise SyntaxError ( "You cannot use 'isin' with a complex condition" ) | Selects the samples having the metadata attribute between the values provided as input |
47,393 | def daemonize ( self ) : self . _double_fork ( ) self . pid = os . getpid ( ) LOG . info ( "Succesfully daemonized process {0}." . format ( self . pid ) ) | Double fork and set the pid . |
47,394 | def _double_fork ( self ) : try : pid = os . fork ( ) if pid > 0 : sys . exit ( 0 ) return None except OSError as err : LOG . exception ( "Fork #1 failed: {0} ({1})" . format ( err . errno , err . strerror , ) , ) sys . exit ( exit . DAEMONIZE_FAILED ) return None os . chdir ( "/" ) os . setsid ( ) os . umask ( 0 ) try : pid = os . fork ( ) if pid > 0 : sys . exit ( 0 ) except OSError as err : LOG . exception ( "Fork #2 failed: {0} ({1})" . format ( err . errno , err . strerror , ) , ) sys . exit ( exit . DAEMONIZE_FAILED ) return None | Do the UNIX double - fork magic . |
47,395 | def from_memory ( cls , data , meta ) : obj = cls ( ) obj . data = data obj . meta = meta return obj | Overloaded constructor to create the GenometricSpace object from memory data and meta variables . The indexes of the data and meta dataframes should be the same . |
47,396 | def load ( self , _path , regs = [ 'chr' , 'left' , 'right' , 'strand' ] , meta = [ ] , values = [ ] , full_load = False , file_extension = "gdm" ) : if not full_load : warnings . warn ( "\n\nYou are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True." ) p = Parser ( _path ) self . meta = p . parse_meta ( meta ) self . data = p . parse_data ( regs , values , full_load = full_load , extension = file_extension ) self . _path = _path | Parses and loads the data into instance attributes . The indexes of the data and meta dataframes should be the same . |
47,397 | def set_meta ( self , selected_meta ) : meta_names = list ( selected_meta ) meta_names . append ( 'sample' ) meta_index = [ ] warnings . warn ( "\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id" ) for x in meta_names : meta_index . append ( self . meta . ix [ self . data . index . get_level_values ( - 1 ) ] [ x ] . values ) meta_index = np . asarray ( meta_index ) multi_meta_index = pd . MultiIndex . from_arrays ( meta_index , names = meta_names ) self . data . index = multi_meta_index | Sets one axis of the 2D multi - indexed dataframe index to the selected meta data . |
47,398 | def to_matrix ( self , values , selected_regions , default_value = 0 ) : if isinstance ( values , list ) : for v in values : try : self . data [ v ] = self . data [ v ] . map ( float ) except : print ( self . data [ v ] ) else : self . data [ values ] = self . data [ values ] . map ( float ) print ( "started pivoting" ) self . data = pd . pivot_table ( self . data , values = values , columns = selected_regions , index = [ 'sample' ] , fill_value = default_value ) print ( "end of pivoting" ) | Creates a 2D multi - indexed matrix representation of the data . This representation allows the data to be sent to the machine learning algorithms . |
47,399 | def get_values ( self , set , selected_meta ) : warnings . warn ( "\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id" ) sample_ids = set . index . get_level_values ( - 1 ) corresponding_meta = self . meta . loc [ sample_ids ] values = corresponding_meta [ selected_meta ] try : values = values . astype ( float ) except ValueError : print ( "the values should be numeric" ) return values | Retrieves the selected metadata values of the given set |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.