idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
47,200
def state_estimation_ensemble ( data , k , n_runs = 10 , M_list = [ ] , ** se_params ) : if len ( M_list ) == 0 : M_list = [ ] for i in range ( n_runs ) : M , W , ll = poisson_estimate_state ( data , k , ** se_params ) M_list . append ( M ) M_stacked = np . hstack ( M_list ) M_new , W_new , ll = poisson_estimate_state ( M_stacked , k , ** se_params ) W_new = np . dot ( data . T , M_new ) W_new = W_new / W_new . sum ( 0 ) return M_new , W_new , ll
Runs an ensemble method on the list of M results ...
47,201
def nmf_ensemble ( data , k , n_runs = 10 , W_list = [ ] , ** nmf_params ) : nmf = NMF ( k ) if len ( W_list ) == 0 : W_list = [ ] for i in range ( n_runs ) : W = nmf . fit_transform ( data ) W_list . append ( W ) W_stacked = np . hstack ( W_list ) nmf_w = nmf . fit_transform ( W_stacked ) nmf_h = nmf . components_ H_new = data . T . dot ( nmf_w ) . T nmf2 = NMF ( k , init = 'custom' ) nmf_w = nmf2 . fit_transform ( data , W = nmf_w , H = H_new ) H_new = nmf2 . components_ return nmf_w , H_new
Runs an ensemble method on the list of NMF W matrices ...
47,202
def nmf_tsne ( data , k , n_runs = 10 , init = 'enhanced' , ** params ) : clusters = [ ] nmf = NMF ( k ) tsne = TSNE ( 2 ) km = KMeans ( k ) for i in range ( n_runs ) : w = nmf . fit_transform ( data ) h = nmf . components_ tsne_wh = tsne . fit_transform ( w . dot ( h ) . T ) clust = km . fit_predict ( tsne_wh ) clusters . append ( clust ) clusterings = np . vstack ( clusters ) consensus = CE . cluster_ensembles ( clusterings , verbose = False , N_clusters_max = k ) nmf_new = NMF ( k , init = 'custom' ) init_w , init_h = nmf_init ( data , consensus , k , init ) W = nmf_new . fit_transform ( data , W = init_w , H = init_h ) H = nmf_new . components_ return W , H
runs tsne - consensus - NMF
47,203
def poisson_consensus_se ( data , k , n_runs = 10 , ** se_params ) : clusters = [ ] for i in range ( n_runs ) : assignments , means = poisson_cluster ( data , k ) clusters . append ( assignments ) clusterings = np . vstack ( clusters ) consensus = CE . cluster_ensembles ( clusterings , verbose = False , N_clusters_max = k ) init_m , init_w = nmf_init ( data , consensus , k , 'basic' ) M , W , ll = poisson_estimate_state ( data , k , init_means = init_m , init_weights = init_w , ** se_params ) return M , W , ll
Initializes Poisson State Estimation using a consensus Poisson clustering .
47,204
async def login ( self , email : str , password : str ) -> bool : login_resp = await self . _request ( 'post' , API_URL_USER , json = { 'version' : '1.0' , 'method' : 'Signin' , 'param' : { 'Email' : email , 'Password' : password , 'CaptchaCode' : '' } , 'sourcetype' : 0 } ) _LOGGER . debug ( 'Login response: %s' , login_resp ) if login_resp . get ( 'Code' ) != 0 : return False self . account_id = login_resp [ 'Json' ] [ 'gid' ] return True
Login to the profile .
47,205
async def packages ( self , package_state : Union [ int , str ] = '' , show_archived : bool = False ) -> list : packages_resp = await self . _request ( 'post' , API_URL_BUYER , json = { 'version' : '1.0' , 'method' : 'GetTrackInfoList' , 'param' : { 'IsArchived' : show_archived , 'Item' : '' , 'Page' : 1 , 'PerPage' : 40 , 'PackageState' : package_state , 'Sequence' : '0' } , 'sourcetype' : 0 } ) _LOGGER . debug ( 'Packages response: %s' , packages_resp ) packages = [ ] for package in packages_resp . get ( 'Json' , [ ] ) : last_event = package . get ( 'FLastEvent' ) if last_event : event = json . loads ( last_event ) else : event = { } kwargs = { 'destination_country' : package . get ( 'FSecondCountry' , 0 ) , 'friendly_name' : package . get ( 'FRemark' ) , 'info_text' : event . get ( 'z' ) , 'location' : event . get ( 'c' ) , 'origin_country' : package . get ( 'FFirstCountry' , 0 ) , 'package_type' : package . get ( 'FTrackStateType' , 0 ) , 'status' : package . get ( 'FPackageState' , 0 ) } packages . append ( Package ( package [ 'FTrackNo' ] , ** kwargs ) ) return packages
Get the list of packages associated with the account .
47,206
async def summary ( self , show_archived : bool = False ) -> dict : summary_resp = await self . _request ( 'post' , API_URL_BUYER , json = { 'version' : '1.0' , 'method' : 'GetIndexData' , 'param' : { 'IsArchived' : show_archived } , 'sourcetype' : 0 } ) _LOGGER . debug ( 'Summary response: %s' , summary_resp ) results = { } for kind in summary_resp . get ( 'Json' , { } ) . get ( 'eitem' , [ ] ) : results [ PACKAGE_STATUS_MAP [ kind [ 'e' ] ] ] = kind [ 'ec' ] return results
Get a quick summary of how many packages are in an account .
47,207
def setup_platform ( hass , config , add_entities , discovery_info = None ) : if discovery_info is None : return switches = [ ] manager = hass . data [ DOMAIN ] [ 'manager' ] if manager . outlets is not None and manager . outlets : if len ( manager . outlets ) == 1 : count_string = 'switch' else : count_string = 'switches' _LOGGER . info ( "Discovered %d VeSync %s" , len ( manager . outlets ) , count_string ) if len ( manager . outlets ) > 1 : for switch in manager . outlets : switch . _energy_update_interval = ENERGY_UPDATE_INT switches . append ( VeSyncSwitchHA ( switch ) ) _LOGGER . info ( "Added a VeSync switch named '%s'" , switch . device_name ) else : switches . append ( VeSyncSwitchHA ( manager . outlets ) ) else : _LOGGER . info ( "No VeSync switches found" ) add_entities ( switches )
Set up the VeSync switch platform .
47,208
def device_state_attributes ( self ) : attr = { } attr [ 'active_time' ] = self . smartplug . active_time attr [ 'voltage' ] = self . smartplug . voltage attr [ 'active_time' ] = self . smartplug . active_time attr [ 'weekly_energy_total' ] = self . smartplug . weekly_energy_total attr [ 'monthly_energy_total' ] = self . smartplug . monthly_energy_total attr [ 'yearly_energy_total' ] = self . smartplug . yearly_energy_total return attr
Return the state attributes of the device .
47,209
def get_variants ( self , chromosome = None , start = None , end = None ) : query = { } if chromosome : query [ 'chrom' ] = chromosome if start : query [ 'start' ] = { '$lte' : end } query [ 'end' ] = { '$gte' : start } LOG . info ( "Find all variants {}" . format ( query ) ) return self . db . variant . find ( query ) . sort ( [ ( 'start' , ASCENDING ) ] )
Return all variants in the database If no region is specified all variants will be returned .
47,210
def delete_variant ( self , variant ) : mongo_variant = self . get_variant ( variant ) if mongo_variant : if mongo_variant [ 'observations' ] == 1 : LOG . debug ( "Removing variant {0}" . format ( mongo_variant . get ( '_id' ) ) ) message = self . db . variant . delete_one ( { '_id' : variant [ '_id' ] } ) else : LOG . debug ( "Decreasing observations for {0}" . format ( mongo_variant . get ( '_id' ) ) ) message = self . db . variant . update_one ( { '_id' : mongo_variant [ '_id' ] } , { '$inc' : { 'observations' : - 1 , 'homozygote' : - ( variant . get ( 'homozygote' , 0 ) ) , 'hemizygote' : - ( variant . get ( 'hemizygote' , 0 ) ) , } , '$pull' : { 'families' : variant . get ( 'case_id' ) } } , upsert = False ) return
Delete observation in database
47,211
def downsample ( data , percent ) : n_genes = data . shape [ 0 ] n_cells = data . shape [ 1 ] new_data = data . copy ( ) total_count = float ( data . sum ( ) ) to_remove = total_count * percent cell_sums = data . sum ( 0 ) . astype ( float ) cell_gene_probs = data / cell_sums cell_probs = np . array ( cell_sums / total_count ) . flatten ( ) cells_selected = np . random . multinomial ( to_remove , pvals = cell_probs ) for i , num_selected in enumerate ( cells_selected ) : cell_gene = np . array ( cell_gene_probs [ : , i ] ) . flatten ( ) genes_selected = np . random . multinomial ( num_selected , pvals = cell_gene ) if sparse . issparse ( data ) : genes_selected = sparse . csc_matrix ( genes_selected ) . T new_data [ : , i ] -= genes_selected new_data [ new_data < 0 ] = 0 return new_data
downsample the data by removing a given percentage of the reads .
47,212
def nb_estimate_state ( data , clusters , R = None , init_means = None , init_weights = None , max_iters = 10 , tol = 1e-4 , disp = True , inner_max_iters = 400 , normalize = True ) : data_subset = data . copy ( ) genes , cells = data_subset . shape if R is None : nb_indices = find_nb_genes ( data ) data_subset = data [ nb_indices , : ] if init_means is not None and len ( init_means ) > sum ( nb_indices ) : init_means = init_means [ nb_indices , : ] genes , cells = data_subset . shape R = np . zeros ( genes ) P , R = nb_fit ( data_subset ) if init_means is None : means , assignments = kmeans_pp ( data_subset , clusters ) else : means = init_means . copy ( ) clusters = means . shape [ 1 ] w_init = np . random . random ( cells * clusters ) if init_weights is not None : if len ( init_weights . shape ) == 1 : init_weights = initialize_from_assignments ( init_weights , clusters ) w_init = init_weights . reshape ( cells * clusters ) m_init = means . reshape ( genes * clusters ) ll = np . inf for i in range ( max_iters ) : if disp : print ( 'iter: {0}' . format ( i ) ) w_bounds = [ ( 0 , 1.0 ) for x in w_init ] m_bounds = [ ( 0 , None ) for x in m_init ] w_objective , w_deriv = _create_w_objective ( means , data_subset , R ) w_res = minimize ( w_objective , w_init , method = 'L-BFGS-B' , jac = w_deriv , bounds = w_bounds , options = { 'disp' : disp , 'maxiter' : inner_max_iters } ) w_diff = np . sqrt ( np . sum ( ( w_res . x - w_init ) ** 2 ) ) / w_init . size w_new = w_res . x . reshape ( ( clusters , cells ) ) w_init = w_res . x m_objective , m_deriv = _create_m_objective ( w_new , data_subset , R ) m_res = minimize ( m_objective , m_init , method = 'L-BFGS-B' , jac = m_deriv , bounds = m_bounds , options = { 'disp' : disp , 'maxiter' : inner_max_iters } ) m_diff = np . sqrt ( np . sum ( ( m_res . x - m_init ) ** 2 ) ) / m_init . size m_new = m_res . x . reshape ( ( genes , clusters ) ) m_init = m_res . x ll = m_res . fun means = m_new if w_diff < tol and m_diff < tol : break if normalize : w_new = w_new / w_new . sum ( 0 ) return m_new , w_new , R , ll
Uses a Negative Binomial Mixture model to estimate cell states and cell state mixing weights .
47,213
def cli ( ctx , directory , uri , verbose , count ) : loglevel = "INFO" if verbose : loglevel = "DEBUG" coloredlogs . install ( level = loglevel ) p = Path ( directory ) if not p . is_dir ( ) : LOG . warning ( "{0} is not a valid directory" . format ( directory ) ) ctx . abort ( ) start_time = datetime . now ( ) index_call = [ 'loqusdb' , 'index' ] base_call = [ 'loqusdb' ] if uri : base_call . append ( '--uri' ) base_call . append ( uri ) index_call . append ( '--uri' ) index_call . append ( uri ) subprocess . run ( index_call ) base_call . append ( 'load' ) nr_files = 0 for nr_files , file_name in enumerate ( list ( p . glob ( '*.vcf' ) ) , 1 ) : call = deepcopy ( base_call ) case_id = file_name . stem . split ( '.' ) [ 0 ] call . append ( '--sv-variants' ) call . append ( str ( file_name ) ) call . append ( '--case-id' ) call . append ( case_id ) if count : continue try : subprocess . run ( call , check = True ) except subprocess . CalledProcessError as err : LOG . warning ( err ) LOG . warning ( "Failed to load file %s" , filename ) LOG . info ( "Continue with files..." ) if nr_files % 100 : LOG . info ( "%s files loaded" , nr_files ) LOG . info ( "%s files inserted" , nr_files ) LOG . info ( "Time to insert files: {}" . format ( datetime . now ( ) - start_time ) )
Load all files in a directory .
47,214
def nmf_init ( data , clusters , k , init = 'enhanced' ) : init_m = np . zeros ( ( data . shape [ 0 ] , k ) ) if sparse . issparse ( data ) : for i in range ( k ) : if data [ : , clusters == i ] . shape [ 1 ] == 0 : point = np . random . randint ( 0 , data . shape [ 1 ] ) init_m [ : , i ] = data [ : , point ] . toarray ( ) . flatten ( ) else : init_m [ : , i ] = np . array ( data [ : , clusters == i ] . mean ( 1 ) ) . flatten ( ) else : for i in range ( k ) : if data [ : , clusters == i ] . shape [ 1 ] == 0 : point = np . random . randint ( 0 , data . shape [ 1 ] ) init_m [ : , i ] = data [ : , point ] . flatten ( ) else : init_m [ : , i ] = data [ : , clusters == i ] . mean ( 1 ) init_w = np . zeros ( ( k , data . shape [ 1 ] ) ) if init == 'enhanced' : distances = np . zeros ( ( k , data . shape [ 1 ] ) ) for i in range ( k ) : for j in range ( data . shape [ 1 ] ) : distances [ i , j ] = np . sqrt ( ( ( data [ : , j ] - init_m [ : , i ] ) ** 2 ) . sum ( ) ) for i in range ( k ) : for j in range ( data . shape [ 1 ] ) : init_w [ i , j ] = 1 / ( ( distances [ : , j ] / distances [ i , j ] ) ** ( - 2 ) ) . sum ( ) elif init == 'basic' : init_w = initialize_from_assignments ( clusters , k ) elif init == 'nmf' : init_w_ , _ , n_iter = non_negative_factorization ( data . T , n_components = k , init = 'custom' , update_W = False , W = init_m . T ) init_w = init_w_ . T return init_m , init_w
Generates initial M and W given a data set and an array of cluster labels .
47,215
def get_variant_id ( variant ) : variant_id = '_' . join ( [ str ( variant . CHROM ) , str ( variant . POS ) , str ( variant . REF ) , str ( variant . ALT [ 0 ] ) ] ) return variant_id
Get a variant id on the format chrom_pos_ref_alt
47,216
def migrate ( ctx , ) : adapter = ctx . obj [ 'adapter' ] start_time = datetime . now ( ) nr_updated = migrate_database ( adapter ) LOG . info ( "All variants updated, time to complete migration: {}" . format ( datetime . now ( ) - start_time ) ) LOG . info ( "Nr variants that where updated: %s" , nr_updated )
Migrate an old loqusdb instance to 1 . 0
47,217
def export ( ctx , outfile , variant_type ) : adapter = ctx . obj [ 'adapter' ] version = ctx . obj [ 'version' ] LOG . info ( "Export the variants from {0}" . format ( adapter ) ) nr_cases = 0 is_sv = variant_type == 'sv' existing_chromosomes = set ( adapter . get_chromosomes ( sv = is_sv ) ) ordered_chromosomes = [ ] for chrom in CHROMOSOME_ORDER : if chrom in existing_chromosomes : ordered_chromosomes . append ( chrom ) existing_chromosomes . remove ( chrom ) for chrom in existing_chromosomes : ordered_chromosomes . append ( chrom ) nr_cases = adapter . cases ( ) . count ( ) LOG . info ( "Found {0} cases in database" . format ( nr_cases ) ) head = HeaderParser ( ) head . add_fileformat ( "VCFv4.3" ) head . add_meta_line ( "NrCases" , nr_cases ) head . add_info ( "Obs" , '1' , 'Integer' , "The number of observations for the variant" ) head . add_info ( "Hom" , '1' , 'Integer' , "The number of observed homozygotes" ) head . add_info ( "Hem" , '1' , 'Integer' , "The number of observed hemizygotes" ) head . add_version_tracking ( "loqusdb" , version , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M" ) ) if variant_type == 'sv' : head . add_info ( "END" , '1' , 'Integer' , "End position of the variant" ) head . add_info ( "SVTYPE" , '1' , 'String' , "Type of structural variant" ) head . add_info ( "SVLEN" , '1' , 'Integer' , "Length of structural variant" ) for chrom in ordered_chromosomes : length = adapter . get_max_position ( chrom ) head . add_contig ( contig_id = chrom , length = str ( length ) ) print_headers ( head , outfile = outfile ) for chrom in ordered_chromosomes : if variant_type == 'snv' : LOG . info ( "Collecting all SNV variants" ) variants = adapter . get_variants ( chromosome = chrom ) else : LOG . info ( "Collecting all SV variants" ) variants = adapter . get_sv_variants ( chromosome = chrom ) LOG . info ( "{} variants found" . format ( variants . count ( ) ) ) for variant in variants : variant_line = format_variant ( variant , variant_type = variant_type ) print_variant ( variant_line = variant_line , outfile = outfile )
Export the variants of a loqus db The variants are exported to a vcf file
47,218
def load_database ( adapter , variant_file = None , sv_file = None , family_file = None , family_type = 'ped' , skip_case_id = False , gq_treshold = None , case_id = None , max_window = 3000 , profile_file = None , hard_threshold = 0.95 , soft_threshold = 0.9 ) : vcf_files = [ ] nr_variants = None vcf_individuals = None if variant_file : vcf_info = check_vcf ( variant_file ) nr_variants = vcf_info [ 'nr_variants' ] variant_type = vcf_info [ 'variant_type' ] vcf_files . append ( variant_file ) vcf_individuals = vcf_info [ 'individuals' ] nr_sv_variants = None sv_individuals = None if sv_file : vcf_info = check_vcf ( sv_file , 'sv' ) nr_sv_variants = vcf_info [ 'nr_variants' ] vcf_files . append ( sv_file ) sv_individuals = vcf_info [ 'individuals' ] profiles = None matches = None if profile_file : profiles = get_profiles ( adapter , profile_file ) matches = profile_match ( adapter , profiles , hard_threshold = hard_threshold , soft_threshold = soft_threshold ) for _vcf_file in vcf_files : vcf = get_vcf ( _vcf_file ) if gq_treshold : if not vcf . contains ( 'GQ' ) : LOG . warning ( 'Set gq-treshold to 0 or add info to vcf {0}' . format ( _vcf_file ) ) raise SyntaxError ( 'GQ is not defined in vcf header' ) family = None family_id = None if family_file : LOG . info ( "Loading family from %s" , family_file ) with open ( family_file , 'r' ) as family_lines : family = get_case ( family_lines = family_lines , family_type = family_type ) family_id = family . family_id case_id = case_id or family_id case_obj = build_case ( case = family , case_id = case_id , vcf_path = variant_file , vcf_individuals = vcf_individuals , nr_variants = nr_variants , vcf_sv_path = sv_file , sv_individuals = sv_individuals , nr_sv_variants = nr_sv_variants , profiles = profiles , matches = matches , profile_path = profile_file ) load_case ( adapter = adapter , case_obj = case_obj , ) nr_inserted = 0 for file_type in [ 'vcf_path' , 'vcf_sv_path' ] : variant_type = 'snv' if file_type == 'vcf_sv_path' : variant_type = 'sv' if case_obj . get ( file_type ) is None : continue vcf_obj = get_vcf ( case_obj [ file_type ] ) try : nr_inserted += load_variants ( adapter = adapter , vcf_obj = vcf_obj , case_obj = case_obj , skip_case_id = skip_case_id , gq_treshold = gq_treshold , max_window = max_window , variant_type = variant_type , ) except Exception as err : LOG . warning ( err ) delete ( adapter = adapter , case_obj = case_obj , ) raise err return nr_inserted
Load the database with a case and its variants
47,219
def load_case ( adapter , case_obj , update = False ) : existing_case = adapter . case ( case_obj ) if existing_case : if not update : raise CaseError ( "Case {0} already exists in database" . format ( case_obj [ 'case_id' ] ) ) case_obj = update_case ( case_obj , existing_case ) try : adapter . add_case ( case_obj , update = update ) except CaseError as err : raise err return case_obj
Load a case to the database
47,220
def load_variants ( adapter , vcf_obj , case_obj , skip_case_id = False , gq_treshold = None , max_window = 3000 , variant_type = 'snv' ) : if variant_type == 'snv' : nr_variants = case_obj [ 'nr_variants' ] else : nr_variants = case_obj [ 'nr_sv_variants' ] nr_inserted = 0 case_id = case_obj [ 'case_id' ] if skip_case_id : case_id = None with click . progressbar ( vcf_obj , label = "Inserting variants" , length = nr_variants ) as bar : variants = ( build_variant ( variant , case_obj , case_id , gq_treshold ) for variant in bar ) if variant_type == 'sv' : for sv_variant in variants : if not sv_variant : continue adapter . add_structural_variant ( variant = sv_variant , max_window = max_window ) nr_inserted += 1 if variant_type == 'snv' : nr_inserted = adapter . add_variants ( variants ) LOG . info ( "Inserted %s variants of type %s" , nr_inserted , variant_type ) return nr_inserted
Load variants for a family into the database .
47,221
def max_variance_genes ( data , nbins = 5 , frac = 0.2 ) : indices = [ ] if sparse . issparse ( data ) : means , var = sparse_mean_var ( data ) else : means = data . mean ( 1 ) var = data . var ( 1 ) mean_indices = means . argsort ( ) n_elements = int ( data . shape [ 0 ] / nbins ) frac_elements = int ( n_elements * frac ) for i in range ( nbins ) : bin_i = mean_indices [ i * n_elements : ( i + 1 ) * n_elements ] if i == nbins - 1 : bin_i = mean_indices [ i * n_elements : ] var_i = var [ bin_i ] var_sorted = var_i . argsort ( ) top_var_indices = var_sorted [ len ( bin_i ) - frac_elements : ] ind = bin_i [ top_var_indices ] ind = [ index for index in ind if var [ index ] > 0 ] indices . extend ( ind ) return indices
This function identifies the genes that have the max variance across a number of bins sorted by mean .
47,222
def cell_normalize ( data ) : if sparse . issparse ( data ) : data = sparse . csc_matrix ( data . astype ( float ) ) sparse_cell_normalize ( data . data , data . indices , data . indptr , data . shape [ 1 ] , data . shape [ 0 ] ) return data data_norm = data . astype ( float ) total_umis = [ ] for i in range ( data . shape [ 1 ] ) : di = data_norm [ : , i ] total_umis . append ( di . sum ( ) ) di /= total_umis [ i ] med = np . median ( total_umis ) data_norm *= med return data_norm
Returns the data where the expression is normalized so that the total count per cell is equal .
47,223
def get_individual_positions ( individuals ) : ind_pos = { } if individuals : for i , ind in enumerate ( individuals ) : ind_pos [ ind ] = i return ind_pos
Return a dictionary with individual positions
47,224
def build_case ( case , vcf_individuals = None , case_id = None , vcf_path = None , sv_individuals = None , vcf_sv_path = None , nr_variants = None , nr_sv_variants = None , profiles = None , matches = None , profile_path = None ) : individual_positions = get_individual_positions ( vcf_individuals ) sv_individual_positions = get_individual_positions ( sv_individuals ) family_id = None if case : if not case . affected_individuals : LOG . warning ( "No affected individuals could be found in ped file" ) family_id = case . family_id case_id = case_id or family_id if case_id is None : raise CaseError case_obj = Case ( case_id = case_id , ) if vcf_path : case_obj [ 'vcf_path' ] = vcf_path case_obj [ 'nr_variants' ] = nr_variants if vcf_sv_path : case_obj [ 'vcf_sv_path' ] = vcf_sv_path case_obj [ 'nr_sv_variants' ] = nr_sv_variants if profile_path : case_obj [ 'profile_path' ] = profile_path ind_objs = [ ] if case : if individual_positions : _ind_pos = individual_positions else : _ind_pos = sv_individual_positions for ind_id in case . individuals : individual = case . individuals [ ind_id ] try : profile = profiles [ ind_id ] if profiles else None similar_samples = matches [ ind_id ] if matches else None ind_obj = Individual ( ind_id = ind_id , case_id = case_id , ind_index = _ind_pos [ ind_id ] , sex = individual . sex , profile = profile , similar_samples = similar_samples ) ind_objs . append ( dict ( ind_obj ) ) except KeyError : raise CaseError ( "Ind %s in ped file does not exist in VCF" , ind_id ) else : for ind_id in individual_positions : profile = profiles [ ind_id ] if profiles else None similar_samples = matches [ ind_id ] if matches else None ind_obj = Individual ( ind_id = ind_id , case_id = case_id , ind_index = individual_positions [ ind_id ] , profile = profile , similar_samples = similar_samples ) ind_objs . append ( dict ( ind_obj ) ) for ind_obj in ind_objs : if vcf_sv_path : case_obj [ 'sv_individuals' ] . append ( dict ( ind_obj ) ) case_obj [ '_sv_inds' ] [ ind_obj [ 'ind_id' ] ] = dict ( ind_obj ) if vcf_path : case_obj [ 'individuals' ] . append ( dict ( ind_obj ) ) case_obj [ '_inds' ] [ ind_obj [ 'ind_id' ] ] = dict ( ind_obj ) return case_obj
Build a Case from the given information
47,225
def generate_poisson_data ( centers , n_cells , cluster_probs = None ) : genes , clusters = centers . shape output = np . zeros ( ( genes , n_cells ) ) if cluster_probs is None : cluster_probs = np . ones ( clusters ) / clusters labels = [ ] for i in range ( n_cells ) : c = np . random . choice ( range ( clusters ) , p = cluster_probs ) labels . append ( c ) output [ : , i ] = np . random . poisson ( centers [ : , c ] ) return output , np . array ( labels )
Generates poisson - distributed data given a set of means for each cluster .
47,226
def generate_zip_data ( M , L , n_cells , cluster_probs = None ) : genes , clusters = M . shape output = np . zeros ( ( genes , n_cells ) ) if cluster_probs is None : cluster_probs = np . ones ( clusters ) / clusters zip_p = np . random . random ( ( genes , n_cells ) ) labels = [ ] for i in range ( n_cells ) : c = np . random . choice ( range ( clusters ) , p = cluster_probs ) labels . append ( c ) output [ : , i ] = np . where ( zip_p [ : , i ] < L [ : , c ] , 0 , np . random . poisson ( M [ : , c ] ) ) return output , np . array ( labels )
Generates zero - inflated poisson - distributed data given a set of means and zero probs for each cluster .
47,227
def generate_state_data ( means , weights ) : x_true = np . dot ( means , weights ) sample = np . random . poisson ( x_true ) return sample . astype ( float )
Generates data according to the Poisson Convex Mixture Model .
47,228
def generate_zip_state_data ( means , weights , z ) : x_true = np . dot ( means , weights ) sample = np . random . poisson ( x_true ) random = np . random . random ( x_true . shape ) x_true [ random < z ] = 0 return sample . astype ( float )
Generates data according to the Zero - inflated Poisson Convex Mixture Model .
47,229
def generate_nb_state_data ( means , weights , R ) : cells = weights . shape [ 1 ] x_true = np . dot ( means , weights ) R_ = np . tile ( R , ( cells , 1 ) ) . T P_true = x_true / ( R_ + x_true ) sample = np . random . negative_binomial ( np . tile ( R , ( cells , 1 ) ) . T , P_true ) return sample . astype ( float )
Generates data according to the Negative Binomial Convex Mixture Model .
47,230
def generate_poisson_lineage ( n_states , n_cells_per_cluster , n_genes , means = 300 ) : M = np . random . random ( ( n_genes , n_states ) ) * means center = M . mean ( 1 ) W = np . zeros ( ( n_states , n_cells_per_cluster * n_states ) ) index = 0 means = np . array ( [ 1.0 / n_states ] * n_states ) for c in range ( n_states ) : for i in range ( n_cells_per_cluster ) : w = np . copy ( means ) new_value = w [ c ] + i * ( 1.0 - 1.0 / n_states ) / n_cells_per_cluster w [ : ] = ( 1.0 - new_value ) / ( n_states - 1.0 ) w [ c ] = new_value W [ : , index ] = w index += 1 return M , W
Generates a lineage for each state - assumes that each state has a common ancestor .
47,231
def generate_nb_data ( P , R , n_cells , assignments = None ) : genes , clusters = P . shape output = np . zeros ( ( genes , n_cells ) ) if assignments is None : cluster_probs = np . ones ( clusters ) / clusters labels = [ ] for i in range ( n_cells ) : if assignments is None : c = np . random . choice ( range ( clusters ) , p = cluster_probs ) else : c = assignments [ i ] labels . append ( c ) output [ : , i ] = np . random . negative_binomial ( R [ : , c ] , 1.0 - P [ : , c ] ) return output , np . array ( labels )
Generates negative binomial data
47,232
def visualize_poisson_w ( w , labels , filename , method = 'pca' , figsize = ( 18 , 10 ) , title = '' , ** scatter_options ) : if method == 'pca' : pca = PCA ( 2 ) r_dim_red = pca . fit_transform ( w . T ) . T elif method == 'tsne' : pass else : print ( "Method is not available. use 'pca' (default) or 'tsne'." ) return visualize_dim_red ( r_dim_red , labels , filename , figsize , title , ** scatter_options )
Saves a scatter plot of a visualization of W the result from Poisson SE .
47,233
def generate_visualizations ( methods , data , true_labels , base_dir = 'visualizations' , figsize = ( 18 , 10 ) , ** scatter_options ) : plt . figure ( figsize = figsize ) for method in methods : preproc = method [ 0 ] if isinstance ( preproc , Preprocess ) : preprocessed , ll = preproc . run ( data ) output_names = preproc . output_names else : p1 = data output_names = [ '' ] for p in preproc : p1 , ll = p . run ( p1 ) p1 = p1 [ 0 ] output_names [ 0 ] = output_names [ 0 ] + p . output_names [ 0 ] preprocessed = [ p1 ] for r , name in zip ( preprocessed , output_names ) : print ( name ) if r . shape [ 0 ] == 2 : r_dim_red = r else : if sparse . issparse ( r ) and r . shape [ 0 ] > 100 : name = 'tsvd_' + name tsvd = TruncatedSVD ( 50 ) r_dim_red = tsvd . fit_transform ( r . T ) try : tsne = TSNE ( 2 ) r_dim_red = tsne . fit_transform ( r_dim_red ) . T name = 'tsne_' + name except : tsvd2 = TruncatedSVD ( 2 ) r_dim_red = tsvd2 . fit_transform ( r_dim_red ) . T else : name = 'tsne_' + name tsne = TSNE ( 2 ) r_dim_red = tsne . fit_transform ( r . T ) . T if isinstance ( method [ 1 ] , list ) : for clustering_method in method [ 1 ] : try : cluster_labels = clustering_method . run ( r ) except : print ( 'clustering failed' ) continue output_path = base_dir + '/{0}_{1}_labels.png' . format ( name , clustering_method . name ) visualize_dim_red ( r_dim_red , cluster_labels , output_path , ** scatter_options ) else : clustering_method = method [ 1 ] try : cluster_labels = clustering_method . run ( r ) except : print ( 'clustering failed' ) continue output_path = base_dir + '/{0}_{1}_labels.png' . format ( name , clustering_method . name ) visualize_dim_red ( r_dim_red , cluster_labels , output_path , ** scatter_options ) output_path = base_dir + '/{0}_true_labels.png' . format ( name ) visualize_dim_red ( r_dim_red , true_labels , output_path , ** scatter_options )
Generates visualization scatters for all the methods .
47,234
def resolve_updates ( orig_list , updated_list ) : if updated_list is not None and updated_list : if orig_list is None : orig_list = updated_list else : for new_device in updated_list : was_found = False for device in orig_list : if new_device . cid == device . cid : was_found = True break if not was_found : orig_list . append ( new_device ) for device in orig_list : should_remove = True for new_device in updated_list : if device . cid == new_device . cid : should_remove = False break if should_remove : orig_list . remove ( device ) [ device . update ( ) for device in orig_list ] return orig_list
Merges changes from one list of devices against another
47,235
def get_profiles ( adapter , vcf_file ) : vcf = get_file_handle ( vcf_file ) individuals = vcf . samples profiles = { individual : [ ] for individual in individuals } for profile_variant in adapter . profile_variants ( ) : ref = profile_variant [ 'ref' ] alt = profile_variant [ 'alt' ] pos = profile_variant [ 'pos' ] end = pos + 1 chrom = profile_variant [ 'chrom' ] region = f"{chrom}:{pos}-{end}" found_variant = False for variant in vcf ( region ) : variant_id = get_variant_id ( variant ) if variant_id == profile_variant [ '_id' ] : found_variant = True for i , individual in enumerate ( individuals ) : genotype = GENOTYPE_MAP [ variant . gt_types [ i ] ] if genotype == 'hom_alt' : gt_str = f"{alt}{alt}" elif genotype == 'het' : gt_str = f"{ref}{alt}" else : gt_str = f"{ref}{ref}" profiles [ individual ] . append ( gt_str ) break if not found_variant : for individual in individuals : profiles [ individual ] . append ( f"{ref}{ref}" ) return profiles
Given a vcf get a profile string for each sample in the vcf based on the profile variants in the database
47,236
def profile_match ( adapter , profiles , hard_threshold = 0.95 , soft_threshold = 0.9 ) : matches = { sample : [ ] for sample in profiles . keys ( ) } for case in adapter . cases ( ) : for individual in case [ 'individuals' ] : for sample in profiles . keys ( ) : if individual . get ( 'profile' ) : similarity = compare_profiles ( profiles [ sample ] , individual [ 'profile' ] ) if similarity >= hard_threshold : msg = ( f"individual {sample} has a {similarity} similarity " f"with individual {individual['ind_id']} in case " f"{case['case_id']}" ) LOG . critical ( msg ) raise ProfileError if similarity >= soft_threshold : match = f"{case['case_id']}.{individual['ind_id']}" matches [ sample ] . append ( match ) return matches
given a dict of profiles searches through all the samples in the DB for a match . If a matching sample is found an exception is raised and the variants will not be loaded into the database .
47,237
def compare_profiles ( profile1 , profile2 ) : length = len ( profile1 ) profile1 = np . array ( list ( profile1 ) ) profile2 = np . array ( list ( profile2 ) ) similarity_array = profile1 == profile2 matches = np . sum ( similarity_array ) similarity_ratio = matches / length return similarity_ratio
Given two profiles determine the ratio of similarity i . e . the hamming distance between the strings .
47,238
def update_profiles ( adapter ) : for case in adapter . cases ( ) : if case . get ( 'profile_path' ) : profiles = get_profiles ( adapter , case [ 'profile_path' ] ) profiled_individuals = deepcopy ( case [ 'individuals' ] ) for individual in profiled_individuals : ind_id = individual [ 'ind_id' ] try : profile = profiles [ ind_id ] individual [ 'profile' ] = profile except KeyError : LOG . warning ( f"sample IDs in vcf does not match for case {case['case_id']}" ) updated_case = deepcopy ( case ) updated_case [ 'individuals' ] = profiled_individuals adapter . add_case ( updated_case , update = True )
For all cases having vcf_path update the profile string for the samples
47,239
def profile_stats ( adapter , threshold = 0.9 ) : profiles = [ ] samples = [ ] distance_dict = { key : 0 for key in HAMMING_RANGES . keys ( ) } for case in adapter . cases ( ) : for individual in case [ 'individuals' ] : if individual . get ( 'profile' ) : sample_id = f"{case['case_id']}.{individual['ind_id']}" ind_profile = individual [ 'profile' ] distance_array = np . array ( [ ] , dtype = np . float ) for sample , profile in zip ( samples , profiles ) : distance = compare_profiles ( ind_profile , profile ) distance_array = np . append ( distance_array , distance ) if distance >= threshold : LOG . warning ( f"{sample_id} is {distance} similar to {sample}" ) for key , range in HAMMING_RANGES . items ( ) : distance_dict [ key ] += np . sum ( ( distance_array >= range [ 0 ] ) & ( distance_array < range [ 1 ] ) ) profiles . append ( ind_profile ) samples . append ( sample_id ) return distance_dict
Compares the pairwise hamming distances for all the sample profiles in the database . Returns a table of the number of distances within given ranges .
47,240
def purity ( labels , true_labels ) : purity = 0.0 for i in set ( labels ) : indices = ( labels == i ) true_clusters = true_labels [ indices ] if len ( true_clusters ) == 0 : continue counts = Counter ( true_clusters ) lab , count = counts . most_common ( ) [ 0 ] purity += count return float ( purity ) / len ( labels )
Calculates the purity score for the given labels .
47,241
def mdl ( ll , k , data ) : N , m = data . shape cost = ll + ( N * m + m * k ) * ( np . log ( data . sum ( ) / ( N * k ) ) ) return cost
Returns the minimum description length score of the model given its log - likelihood and k the number of cell types .
47,242
def find_nb_genes ( data ) : data_means = data . mean ( 1 ) data_vars = data . var ( 1 ) nb_indices = data_means < 0.9 * data_vars return nb_indices
Finds the indices of all genes in the dataset that have a mean < 0 . 9 variance . Returns an array of booleans .
47,243
def nb_ll ( data , P , R ) : genes , cells = data . shape clusters = P . shape [ 1 ] lls = np . zeros ( ( cells , clusters ) ) for c in range ( clusters ) : P_c = P [ : , c ] . reshape ( ( genes , 1 ) ) R_c = R [ : , c ] . reshape ( ( genes , 1 ) ) ll = gammaln ( R_c + data ) - gammaln ( R_c ) ll += data * np . log ( P_c ) + xlog1py ( R_c , - P_c ) lls [ : , c ] = ll . sum ( 0 ) return lls
Returns the negative binomial log - likelihood of the data .
47,244
def zinb_ll ( data , P , R , Z ) : lls = nb_ll ( data , P , R ) clusters = P . shape [ 1 ] for c in range ( clusters ) : pass return lls
Returns the zero - inflated negative binomial log - likelihood of the data .
47,245
def nb_ll_row ( params , data_row ) : p = params [ 0 ] r = params [ 1 ] n = len ( data_row ) ll = np . sum ( gammaln ( data_row + r ) ) - np . sum ( gammaln ( data_row + 1 ) ) ll -= n * gammaln ( r ) ll += np . sum ( data_row ) * np . log ( p ) ll += n * r * np . log ( 1 - p ) return - ll
returns the negative LL of a single row .
47,246
def nb_fit ( data , P_init = None , R_init = None , epsilon = 1e-8 , max_iters = 100 ) : means = data . mean ( 1 ) variances = data . var ( 1 ) if ( means > variances ) . any ( ) : raise ValueError ( "For NB fit, means must be less than variances" ) genes , cells = data . shape P = 1.0 - means / variances R = means * ( 1 - P ) / P for i in range ( genes ) : result = minimize ( nb_ll_row , [ P [ i ] , R [ i ] ] , args = ( data [ i , : ] , ) , bounds = [ ( 0 , 1 ) , ( eps , None ) ] ) params = result . x P [ i ] = params [ 0 ] R [ i ] = params [ 1 ] return P , R
Fits the NB distribution to data using method of moments .
47,247
def nb_cluster ( data , k , P_init = None , R_init = None , assignments = None , means = None , max_iters = 10 ) : genes , cells = data . shape if P_init is None : P_init = np . random . random ( ( genes , k ) ) if R_init is None : R_init = np . random . randint ( 1 , data . max ( ) , ( genes , k ) ) R_init = R_init . astype ( float ) if assignments is None : _ , assignments = kmeans_pp ( data , k , means ) means = np . zeros ( ( genes , k ) ) old_assignments = np . copy ( assignments ) for i in range ( max_iters ) : nb_gene_indices = fit_cluster ( data , assignments , k , P_init , R_init , means ) lls = nb_ll ( data [ nb_gene_indices , : ] , P_init [ nb_gene_indices , : ] , R_init [ nb_gene_indices , : ] ) lls += pois_ll . poisson_ll ( data [ ~ nb_gene_indices , : ] , means [ ~ nb_gene_indices , : ] ) P_init [ ~ nb_gene_indices , : ] = 0 R_init [ ~ nb_gene_indices , : ] = np . inf for c in range ( cells ) : assignments [ c ] = np . argmax ( lls [ c , : ] ) if np . equal ( assignments , old_assignments ) . all ( ) : break old_assignments = np . copy ( assignments ) return assignments , P_init , R_init
Performs negative binomial clustering on the given data . If some genes have mean > variance then these genes are fitted to a Poisson distribution .
47,248
def zip_ll ( data , means , M ) : genes , cells = data . shape clusters = means . shape [ 1 ] ll = np . zeros ( ( cells , clusters ) ) d0 = ( data == 0 ) d1 = ( data > 0 ) for i in range ( clusters ) : means_i = np . tile ( means [ : , i ] , ( cells , 1 ) ) means_i = means_i . transpose ( ) L_i = np . tile ( M [ : , i ] , ( cells , 1 ) ) L_i = L_i . transpose ( ) ll_0 = np . log ( L_i + ( 1 - L_i ) * np . exp ( - means_i ) ) ll_0 = np . where ( ( L_i == 0 ) & ( means_i == 0 ) , - means_i , ll_0 ) ll_1 = np . log ( 1 - L_i ) + xlogy ( data , means_i ) - means_i ll_0 = np . where ( d0 , ll_0 , 0.0 ) ll_1 = np . where ( d1 , ll_1 , 0.0 ) ll [ : , i ] = np . sum ( ll_0 + ll_1 , 0 ) return ll
Calculates the zero - inflated Poisson log - likelihood .
47,249
def zip_ll_row ( params , data_row ) : l = params [ 0 ] pi = params [ 1 ] d0 = ( data_row == 0 ) likelihood = d0 * pi + ( 1 - pi ) * poisson . pmf ( data_row , l ) return - np . log ( likelihood + eps ) . sum ( )
Returns the negative log - likelihood of a row given ZIP data .
47,250
def preproc_data ( data , gene_subset = False , ** kwargs ) : import uncurl from uncurl . preprocessing import log1p , cell_normalize from sklearn . decomposition import TruncatedSVD data_subset = data if gene_subset : gene_subset = uncurl . max_variance_genes ( data ) data_subset = data [ gene_subset , : ] tsvd = TruncatedSVD ( min ( 8 , data_subset . shape [ 0 ] - 1 ) ) data_tsvd = tsvd . fit_transform ( log1p ( cell_normalize ( data_subset ) ) . T ) return data_tsvd
basic data preprocessing before running gap score
47,251
def calculate_bounding_box ( data ) : mins = data . min ( 0 ) maxes = data . max ( 0 ) return mins , maxes
Returns a 2 x m array indicating the min and max along each dimension .
47,252
def run_gap_k_selection ( data , k_min = 1 , k_max = 50 , B = 5 , skip = 5 , ** kwargs ) : if k_min == k_max : return k_min gap_vals = [ ] sk_vals = [ ] k_range = list ( range ( k_min , k_max , skip ) ) min_k = 0 min_i = 0 for i , k in enumerate ( k_range ) : km = KMeans ( k ) clusters = km . fit_predict ( data ) gap , sk = calculate_gap ( data , clusters , km , B = B ) if len ( gap_vals ) > 1 : if gap_vals [ - 1 ] >= gap - ( skip + 1 ) * sk : min_i = i min_k = k_range [ i - 1 ] break gap_vals . append ( gap ) sk_vals . append ( sk ) if min_k == 0 : min_k = k_max if skip == 1 : return min_k , gap_vals , sk_vals gap_vals = [ ] sk_vals = [ ] for k in range ( min_k - skip , min_k + skip ) : km = KMeans ( k ) clusters = km . fit_predict ( data ) gap , sk = calculate_gap ( data , clusters , km , B = B ) if len ( gap_vals ) > 1 : if gap_vals [ - 1 ] >= gap - sk : min_k = k - 1 return min_k , gap_vals , sk_vals gap_vals . append ( gap ) sk_vals . append ( sk ) return k , gap_vals , sk_vals
Runs gap score for all k from k_min to k_max .
47,253
def get_devices ( self ) -> list : if not self . enabled : return None self . in_process = True response , _ = helpers . call_api ( '/cloud/v1/deviceManaged/devices' , 'post' , headers = helpers . req_headers ( self ) , json = helpers . req_body ( self , 'devicelist' ) ) if response and helpers . check_response ( response , 'get_devices' ) : if 'result' in response and 'list' in response [ 'result' ] : device_list = response [ 'result' ] [ 'list' ] outlets , switches , fans = self . process_devices ( device_list ) else : logger . error ( 'Device list in response not found' ) else : logger . error ( 'Error retrieving device list' ) self . in_process = False return ( outlets , switches , fans )
Return list of VeSync devices
47,254
def login ( self ) -> bool : user_check = isinstance ( self . username , str ) and len ( self . username ) > 0 pass_check = isinstance ( self . password , str ) and len ( self . password ) > 0 if user_check and pass_check : response , _ = helpers . call_api ( '/cloud/v1/user/login' , 'post' , json = helpers . req_body ( self , 'login' ) ) if response and helpers . check_response ( response , 'login' ) : self . token = response [ 'result' ] [ 'token' ] self . account_id = response [ 'result' ] [ 'accountID' ] self . enabled = True return True else : logger . error ( 'Error logging in with username and password' ) return False else : if user_check is False : logger . error ( 'Username invalid' ) if pass_check is False : logger . error ( 'Password invalid' ) return False
Return True if log in request succeeds
47,255
def update ( self ) : if self . device_time_check ( ) : if not self . in_process : outlets , switches , fans = self . get_devices ( ) self . outlets = helpers . resolve_updates ( self . outlets , outlets ) self . switches = helpers . resolve_updates ( self . switches , switches ) self . fans = helpers . resolve_updates ( self . fans , fans ) self . last_update_ts = time . time ( )
Fetch updated information about devices
47,256
def update_energy ( self , bypass_check = False ) : for outlet in self . outlets : outlet . update_energy ( bypass_check )
Fetch updated energy information about devices
47,257
def DistFitDataset ( Dat ) : ( r , c ) = Dat . shape Poiss = np . zeros ( r ) Norm = np . zeros ( r ) LogNorm = np . zeros ( r ) for i in range ( r ) : temp = GetDistFitError ( Dat [ i ] ) Poiss [ i ] = temp [ 'poiss' ] Norm [ i ] = temp [ 'norm' ] LogNorm [ i ] = temp [ 'lognorm' ] d = { } d [ 'poiss' ] = Poiss d [ 'norm' ] = Norm d [ 'lognorm' ] = LogNorm return d
Given a data matrix this returns the per - gene fit error for the Poisson Normal and Log - Normal distributions .
47,258
def annotate ( ctx , variant_file , sv ) : adapter = ctx . obj [ 'adapter' ] variant_path = os . path . abspath ( variant_file ) expected_type = 'snv' if sv : expected_type = 'sv' if 'sv' : nr_cases = adapter . nr_cases ( sv_cases = True ) else : nr_cases = adapter . nr_cases ( snv_cases = True ) LOG . info ( "Found {0} {1} cases in database" . format ( nr_cases , expected_type ) ) vcf_obj = get_file_handle ( variant_path ) add_headers ( vcf_obj , nr_cases = nr_cases , sv = sv ) for header_line in vcf_obj . raw_header . split ( '\n' ) : if len ( header_line ) == 0 : continue click . echo ( header_line ) start_inserting = datetime . now ( ) if sv : annotated_variants = annotate_svs ( adapter , vcf_obj ) else : annotated_variants = annotate_snvs ( adapter , vcf_obj ) for variant in annotated_variants : click . echo ( str ( variant ) . rstrip ( ) )
Annotate the variants in a VCF
47,259
async def find ( self , * tracking_numbers : str ) -> list : data = { 'data' : [ { 'num' : num } for num in tracking_numbers ] } tracking_resp = await self . _request ( 'post' , API_URL_TRACK , json = data ) print ( tracking_resp ) if not tracking_resp . get ( 'dat' ) : raise InvalidTrackingNumberError ( 'Invalid data' ) packages = [ ] for info in tracking_resp [ 'dat' ] : package_info = info . get ( 'track' , { } ) if not package_info : continue kwargs = { 'destination_country' : package_info . get ( 'c' ) , 'info_text' : package_info . get ( 'z0' , { } ) . get ( 'z' ) , 'location' : package_info . get ( 'z0' , { } ) . get ( 'c' ) , 'origin_country' : package_info . get ( 'b' ) , 'package_type' : package_info . get ( 'd' , 0 ) , 'status' : package_info . get ( 'e' , 0 ) , 'tracking_info_language' : package_info . get ( 'ln1' , 'Unknown' ) } packages . append ( Package ( info [ 'no' ] , ** kwargs ) ) return packages
Get tracking info for one or more tracking numbers .
47,260
def binarize ( qualitative ) : thresholds = qualitative . min ( 1 ) + ( qualitative . max ( 1 ) - qualitative . min ( 1 ) ) / 2.0 binarized = qualitative > thresholds . reshape ( ( len ( thresholds ) , 1 ) ) . repeat ( 8 , 1 ) return binarized . astype ( int )
binarizes an expression dataset .
47,261
def qualNorm_filter_genes ( data , qualitative , pval_threshold = 0.05 , smoothing = 1e-5 , eps = 1e-5 ) : genes , cells = data . shape clusters = qualitative . shape [ 1 ] output = np . zeros ( ( genes , clusters ) ) missing_indices = [ ] genes_included = [ ] qual_indices = [ ] thresholds = qualitative . min ( 1 ) + ( qualitative . max ( 1 ) - qualitative . min ( 1 ) ) / 2.0 pvals = np . zeros ( genes ) for i in range ( genes ) : if qualitative [ i , : ] . max ( ) == - 1 and qualitative [ i , : ] . min ( ) == - 1 : missing_indices . append ( i ) continue qual_indices . append ( i ) threshold = thresholds [ i ] data_i = data [ i , : ] if sparse . issparse ( data ) : data_i = data_i . toarray ( ) . flatten ( ) assignments , means = poisson_cluster ( data_i . reshape ( ( 1 , cells ) ) , 2 ) means = means . flatten ( ) high_i = 1 low_i = 0 if means [ 0 ] > means [ 1 ] : high_i = 0 low_i = 1 p_val = poisson_test ( data_i [ assignments == low_i ] , data_i [ assignments == high_i ] , smoothing = smoothing ) pvals [ i ] = p_val if p_val <= pval_threshold : genes_included . append ( i ) else : continue high_mean = np . median ( data_i [ assignments == high_i ] ) low_mean = np . median ( data_i [ assignments == low_i ] ) + eps for k in range ( clusters ) : if qualitative [ i , k ] > threshold : output [ i , k ] = high_mean else : output [ i , k ] = low_mean output = output [ genes_included , : ] pvals = pvals [ genes_included ] return output , pvals , genes_included
Does qualNorm but returns a filtered gene set based on a p - value threshold .
47,262
def script_dir ( pyobject , follow_symlinks = True ) : if getattr ( sys , 'frozen' , False ) : path = abspath ( sys . executable ) else : path = inspect . getabsfile ( pyobject ) if follow_symlinks : path = realpath ( path ) return dirname ( path )
Get current script s directory
47,263
def script_dir_plus_file ( filename , pyobject , follow_symlinks = True ) : return join ( script_dir ( pyobject , follow_symlinks ) , filename )
Get current script s directory and then append a filename
47,264
def identity ( ctx , variant_id ) : if not variant_id : LOG . warning ( "Please provide a variant id" ) ctx . abort ( ) adapter = ctx . obj [ 'adapter' ] version = ctx . obj [ 'version' ] LOG . info ( "Search variants {0}" . format ( adapter ) ) result = adapter . get_clusters ( variant_id ) if result . count ( ) == 0 : LOG . info ( "No hits for variant %s" , variant_id ) return for res in result : click . echo ( res )
Check how well SVs are working in the database
47,265
def registration_authority_entity_id ( self ) : if ATTR_ENTITY_REGISTRATION_AUTHORITY in self . raw : try : return self . raw [ ATTR_ENTITY_REGISTRATION_AUTHORITY ] [ ATTR_ENTITY_REGISTRATION_AUTHORITY_ENTITY_ID ] [ ATTR_DOLLAR_SIGN ] except KeyError : pass
Some entities return the register entity id but other do not . Unsure if this is a bug or inconsistently registered data .
47,266
def legal_form ( self ) : if ATTR_ENTITY_LEGAL_FORM in self . raw : try : return LEGAL_FORMS [ self . legal_jurisdiction ] [ self . raw [ ATTR_ENTITY_LEGAL_FORM ] [ ATTR_ENTITY_LEGAL_FORM_CODE ] [ ATTR_DOLLAR_SIGN ] ] except KeyError : legal_form = self . raw [ ATTR_ENTITY_LEGAL_FORM ] [ ATTR_ENTITY_LEGAL_FORM_CODE ] [ ATTR_DOLLAR_SIGN ] if len ( legal_form ) == 4 : return 'ELF code: ' + legal_form else : return legal_form
In some cases the legal form is stored in the JSON - data . In other cases an ELF - code consisting of mix of exactly four letters and numbers are stored . This ELF - code can be looked up in a registry where a code maps to a organizational type . ELF - codes are not unique it can reoccur under different names in different countries
47,267
def valid_child_records ( self ) : child_lei = list ( ) for d in self . raw [ 'data' ] : if d [ 'attributes' ] [ 'relationship' ] [ 'status' ] in [ 'ACTIVE' ] : child_lei . append ( d [ 'attributes' ] [ 'relationship' ] [ 'startNode' ] [ 'id' ] ) return child_lei
Loop through data to find a valid record . Return list of LEI .
47,268
def from_geojson ( geojson , srid = 4326 ) : type_ = geojson [ "type" ] . lower ( ) if type_ == "geometrycollection" : geometries = [ ] for geometry in geojson [ "geometries" ] : geometries . append ( Geometry . from_geojson ( geometry , srid = None ) ) return GeometryCollection ( geometries , srid ) elif type_ == "point" : return Point ( geojson [ "coordinates" ] , srid = srid ) elif type_ == "linestring" : return LineString ( geojson [ "coordinates" ] , srid = srid ) elif type_ == "polygon" : return Polygon ( geojson [ "coordinates" ] , srid = srid ) elif type_ == "multipoint" : geometries = _MultiGeometry . _multi_from_geojson ( geojson , Point ) return MultiPoint ( geometries , srid = srid ) elif type_ == "multilinestring" : geometries = _MultiGeometry . _multi_from_geojson ( geojson , LineString ) return MultiLineString ( geometries , srid = srid ) elif type_ == "multipolygon" : geometries = _MultiGeometry . _multi_from_geojson ( geojson , Polygon ) return MultiPolygon ( geometries , srid = srid )
Create a Geometry from a GeoJSON . The SRID can be overridden from the expected 4326 .
47,269
def from_shapely ( sgeom , srid = None ) : if SHAPELY : WKBWriter . defaults [ "include_srid" ] = True if srid : lgeos . GEOSSetSRID ( sgeom . _geom , srid ) return Geometry ( sgeom . wkb_hex ) else : raise DependencyError ( "Shapely" )
Create a Geometry from a Shapely geometry and the specified SRID .
47,270
def postgis_type ( self ) : dimz = "Z" if self . dimz else "" dimm = "M" if self . dimm else "" if self . srid : return "geometry({}{}{},{})" . format ( self . type , dimz , dimm , self . srid ) else : return "geometry({}{}{})" . format ( self . type , dimz , dimm )
Get the type of the geometry in PostGIS format including additional dimensions and SRID if they exist .
47,271
def poisson_ll ( data , means ) : if sparse . issparse ( data ) : return sparse_poisson_ll ( data , means ) genes , cells = data . shape clusters = means . shape [ 1 ] ll = np . zeros ( ( cells , clusters ) ) for i in range ( clusters ) : means_i = np . tile ( means [ : , i ] , ( cells , 1 ) ) means_i = means_i . transpose ( ) + eps ll [ : , i ] = np . sum ( xlogy ( data , means_i ) - means_i , 0 ) return ll
Calculates the Poisson log - likelihood .
47,272
def poisson_dist ( p1 , p2 ) : p1_ = p1 + eps p2_ = p2 + eps return np . dot ( p1_ - p2_ , np . log ( p1_ / p2_ ) )
Calculates the Poisson distance between two vectors .
47,273
def delete ( ctx , family_file , family_type , case_id ) : if not ( family_file or case_id ) : LOG . error ( "Please provide a family file" ) ctx . abort ( ) adapter = ctx . obj [ 'adapter' ] family = None family_id = None if family_file : with open ( family_file , 'r' ) as family_lines : family = get_case ( family_lines = family_lines , family_type = family_type ) family_id = family . family_id case_id = case_id or family_id if not case_id : LOG . warning ( "Please provide a case id" ) ctx . abort ( ) existing_case = adapter . case ( { 'case_id' : case_id } ) if not existing_case : LOG . warning ( "Case %s does not exist in database" % case_id ) context . abort start_deleting = datetime . now ( ) try : delete_command ( adapter = adapter , case_obj = existing_case , ) except ( CaseError , IOError ) as error : LOG . warning ( error ) ctx . abort ( )
Delete the variants of a case .
47,274
def update_energy ( self , bypass_check : bool = False ) : if bypass_check or ( not bypass_check and self . update_time_check ) : self . get_weekly_energy ( ) if 'week' in self . energy : self . get_monthly_energy ( ) self . get_yearly_energy ( ) if not bypass_check : self . update_energy_ts = time . time ( )
Builds weekly monthly and yearly dictionaries
47,275
def turn_on_nightlight ( self ) : body = helpers . req_body ( self . manager , 'devicestatus' ) body [ 'uuid' ] = self . uuid body [ 'mode' ] = 'auto' response , _ = helpers . call_api ( '/15a/v1/device/nightlightstatus' , 'put' , headers = helpers . req_headers ( self . manager ) , json = body ) return helpers . check_response ( response , '15a_ntlight' )
Turn on nightlight
47,276
def like_button_js_tag ( context ) : if FACEBOOK_APP_ID is None : log . warning ( "FACEBOOK_APP_ID isn't setup correctly in your settings" ) if FACEBOOK_APP_ID : request = context . get ( 'request' , None ) if request : return { "LIKE_BUTTON_IS_VALID" : True , "facebook_app_id" : FACEBOOK_APP_ID , "channel_base_url" : request . get_host ( ) } return { "LIKE_BUTTON_IS_VALID" : False }
This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup correctly in the django settings if so then it will pass the data along to the intercom_tag template to be displayed .
47,277
def like_button_tag ( context ) : if FACEBOOK_APP_ID is None : log . warning ( "FACEBOOK_APP_ID isn't setup correctly in your settings" ) if FACEBOOK_APP_ID : request = context . get ( 'request' , None ) if request : path_to_like = ( "http://" + request . get_host ( ) + request . get_full_path ( ) ) show_send = true_false_converter ( FACEBOOK_SHOW_SEND ) like_width = FACEBOOK_LIKE_WIDTH show_faces = true_false_converter ( FACEBOOK_SHOW_FACES ) font = FACEBOOK_FONT return { "LIKE_BUTTON_IS_VALID" : True , "path_to_like" : path_to_like , "show_send" : show_send , "like_width" : like_width , "show_faces" : show_faces , "font" : font , "like_layout" : FACEBOOK_LIKE_LAYOUT } return { "LIKE_BUTTON_IS_VALID" : False }
This tag will check to see if they have the FACEBOOK_APP_ID setup correctly in the django settings if so then it will pass the data along to the intercom_tag template to be displayed .
47,278
def get_structural_variant ( self , variant ) : query = { 'chrom' : variant [ 'chrom' ] , 'end_chrom' : variant [ 'end_chrom' ] , 'sv_type' : variant [ 'sv_type' ] , '$and' : [ { 'pos_left' : { '$lte' : variant [ 'pos' ] } } , { 'pos_right' : { '$gte' : variant [ 'pos' ] } } , ] } res = self . db . structural_variant . find ( query ) . sort ( 'pos_left' , 1 ) match = None distance = None closest_hit = None for hit in res : if hit [ 'end_left' ] > variant [ 'end' ] : continue if hit [ 'end_right' ] < variant [ 'end' ] : continue distance = ( abs ( variant [ 'pos' ] - ( hit [ 'pos_left' ] + hit [ 'pos_right' ] ) / 2 ) + abs ( variant [ 'end' ] - ( hit [ 'end_left' ] + hit [ 'end_right' ] ) / 2 ) ) if closest_hit is None : match = hit closest_hit = distance continue if distance < closest_hit : match = hit closest_hit = distance return match
Check if there are any overlapping sv clusters
47,279
def get_sv_variants ( self , chromosome = None , end_chromosome = None , sv_type = None , pos = None , end = None ) : query = { } if chromosome : query [ 'chrom' ] = chromosome if end_chromosome : query [ 'end_chrom' ] = end_chromosome if sv_type : query [ 'sv_type' ] = sv_type if pos : if not '$and' in query : query [ '$and' ] = [ ] query [ '$and' ] . append ( { 'pos_left' : { '$lte' : pos } } ) query [ '$and' ] . append ( { 'pos_right' : { '$gte' : pos } } ) if end : if not '$and' in query : query [ '$and' ] = [ ] query [ '$and' ] . append ( { 'end_left' : { '$lte' : end } } ) query [ '$and' ] . append ( { 'end_right' : { '$gte' : end } } ) LOG . info ( "Find all sv variants {}" . format ( query ) ) return self . db . structural_variant . find ( query ) . sort ( [ ( 'chrom' , ASCENDING ) , ( 'pos_left' , ASCENDING ) ] )
Return all structural variants in the database
47,280
def get_details ( self ) : body = helpers . req_body ( self . manager , 'devicedetail' ) head = helpers . req_headers ( self . manager ) r , _ = helpers . call_api ( '/131airpurifier/v1/device/deviceDetail' , method = 'post' , headers = head , json = body ) if r is not None and helpers . check_response ( r , 'airpur_detail' ) : self . device_status = r . get ( 'deviceStatus' , 'unknown' ) self . connection_status = r . get ( 'connectionStatus' , 'unknown' ) self . details [ 'active_time' ] = r . get ( 'activeTime' , 0 ) self . details [ 'filter_life' ] = r . get ( 'filterLife' , { } ) self . details [ 'screeen_status' ] = r . get ( 'screenStatus' , 'unknown' ) self . details [ 'mode' ] = r . get ( 'mode' , 'unknown' ) self . details [ 'level' ] = r . get ( 'level' , None )
Build details dictionary
47,281
def turn_on ( self ) : if self . device_status != 'on' : body = helpers . req_body ( self . manager , 'devicestatus' ) body [ 'uuid' ] = self . uuid body [ 'status' ] = 'on' head = helpers . req_headers ( self . manager ) r , _ = helpers . call_api ( '/131airPurifier/v1/device/deviceStatus' , 'put' , json = body , headers = head ) if r is not None and helpers . check_response ( r , 'airpur_status' ) : self . device_status = 'on' return True else : return False
Turn Air Purifier on
47,282
def fan_speed ( self , speed : int = None ) -> bool : body = helpers . req_body ( self . manager , 'devicestatus' ) body [ 'uuid' ] = self . uuid head = helpers . req_headers ( self . manager ) if self . details . get ( 'mode' ) != 'manual' : self . mode_toggle ( 'manual' ) else : if speed is not None : level = int ( self . details . get ( 'level' ) ) if speed == level : return False elif speed in [ 1 , 2 , 3 ] : body [ 'level' ] = speed else : if ( level + 1 ) > 3 : body [ 'level' ] = 1 else : body [ 'level' ] = int ( level + 1 ) r , _ = helpers . call_api ( '/131airPurifier/v1/device/updateSpeed' , 'put' , json = body , headers = head ) if r is not None and helpers . check_response ( r , 'airpur_status' ) : self . details [ 'level' ] = body [ 'level' ] return True else : return False
Adjust Fan Speed by Specifying 1 2 3 as argument or cycle through speeds increasing by one
47,283
def mode_toggle ( self , mode : str ) -> bool : head = helpers . req_headers ( self . manager ) body = helpers . req_body ( self . manager , 'devicestatus' ) body [ 'uuid' ] = self . uuid if mode != body [ 'mode' ] and mode in [ 'sleep' , 'auto' , 'manual' ] : body [ 'mode' ] = mode if mode == 'manual' : body [ 'level' ] = 1 r , _ = helpers . call_api ( '/131airPurifier/v1/device/updateMode' , 'put' , json = body , headers = head ) if r is not None and helpers . check_response ( r , 'airpur_status' ) : self . details [ 'mode' ] = mode return True return False
Set mode to manual auto or sleep
47,284
def fourier_series ( x , * a ) : output = 0 output += a [ 0 ] / 2 w = a [ 1 ] for n in range ( 2 , len ( a ) , 2 ) : n_ = n / 2 val1 = a [ n ] val2 = a [ n + 1 ] output += val1 * np . sin ( n_ * x * w ) output += val2 * np . cos ( n_ * x * w ) return output
Arbitrary dimensionality fourier series .
47,285
def poly_curve ( x , * a ) : output = 0.0 for n in range ( 0 , len ( a ) ) : output += a [ n ] * x ** n return output
Arbitrary dimension polynomial .
47,286
def set_ocha_url ( cls , url = None ) : if url is None : url = cls . _ochaurl_int cls . _ochaurl = url
Set World Bank url from which to retrieve countries data
47,287
def get_country_info_from_iso3 ( cls , iso3 , use_live = True , exception = None ) : countriesdata = cls . countriesdata ( use_live = use_live ) country = countriesdata [ 'countries' ] . get ( iso3 . upper ( ) ) if country is not None : return country if exception is not None : raise exception return None
Get country information from ISO3 code
47,288
def get_country_name_from_iso3 ( cls , iso3 , use_live = True , exception = None ) : countryinfo = cls . get_country_info_from_iso3 ( iso3 , use_live = use_live , exception = exception ) if countryinfo is not None : return countryinfo . get ( '#country+name+preferred' ) return None
Get country name from ISO3 code
47,289
def get_iso2_from_iso3 ( cls , iso3 , use_live = True , exception = None ) : countriesdata = cls . countriesdata ( use_live = use_live ) iso2 = countriesdata [ 'iso2iso3' ] . get ( iso3 . upper ( ) ) if iso2 is not None : return iso2 if exception is not None : raise exception return None
Get ISO2 from ISO3 code
47,290
def get_m49_from_iso3 ( cls , iso3 , use_live = True , exception = None ) : countriesdata = cls . countriesdata ( use_live = use_live ) m49 = countriesdata [ 'm49iso3' ] . get ( iso3 ) if m49 is not None : return m49 if exception is not None : raise exception return None
Get M49 from ISO3 code
47,291
def simplify_countryname ( cls , country ) : countryupper = country . upper ( ) words = get_words_in_sentence ( countryupper ) index = countryupper . find ( ',' ) if index != - 1 : countryupper = countryupper [ : index ] index = countryupper . find ( ':' ) if index != - 1 : countryupper = countryupper [ : index ] regex = re . compile ( '\(.+?\)' ) countryupper = regex . sub ( '' , countryupper ) remove = copy . deepcopy ( cls . simplifications ) for simplification1 , simplification2 in cls . abbreviations . items ( ) : countryupper = countryupper . replace ( simplification1 , '' ) remove . append ( simplification2 ) for simplification1 , simplifications in cls . multiple_abbreviations . items ( ) : countryupper = countryupper . replace ( simplification1 , '' ) for simplification2 in simplifications : remove . append ( simplification2 ) remove = '|' . join ( remove ) regex = re . compile ( r'\b(' + remove + r')\b' , flags = re . IGNORECASE ) countryupper = regex . sub ( '' , countryupper ) countryupper = countryupper . strip ( ) countryupper_words = get_words_in_sentence ( countryupper ) if len ( countryupper_words ) > 1 : countryupper = countryupper_words [ 0 ] if countryupper : words . remove ( countryupper ) return countryupper , words
Simplifies country name by removing descriptive text eg . DEMOCRATIC REPUBLIC OF etc .
47,292
def get_iso3_country_code ( cls , country , use_live = True , exception = None ) : countriesdata = cls . countriesdata ( use_live = use_live ) countryupper = country . upper ( ) len_countryupper = len ( countryupper ) if len_countryupper == 3 : if countryupper in countriesdata [ 'countries' ] : return countryupper elif len_countryupper == 2 : iso3 = countriesdata [ 'iso2iso3' ] . get ( countryupper ) if iso3 is not None : return iso3 iso3 = countriesdata [ 'countrynames2iso3' ] . get ( countryupper ) if iso3 is not None : return iso3 for candidate in cls . expand_countryname_abbrevs ( countryupper ) : iso3 = countriesdata [ 'countrynames2iso3' ] . get ( candidate ) if iso3 is not None : return iso3 if exception is not None : raise exception return None
Get ISO3 code for cls . Only exact matches or None are returned .
47,293
def get_iso3_country_code_fuzzy ( cls , country , use_live = True , exception = None ) : countriesdata = cls . countriesdata ( use_live = use_live ) iso3 = cls . get_iso3_country_code ( country , use_live = use_live ) if iso3 is not None : return iso3 , True def remove_matching_from_list ( wordlist , word_or_part ) : for word in wordlist : if word_or_part in word : wordlist . remove ( word ) expanded_country_candidates = cls . expand_countryname_abbrevs ( country ) match_strength = 0 matches = set ( ) for countryname in sorted ( countriesdata [ 'countrynames2iso3' ] ) : for candidate in expanded_country_candidates : simplified_country , removed_words = cls . simplify_countryname ( candidate ) if simplified_country in countryname : words = get_words_in_sentence ( countryname ) new_match_strength = 0 if simplified_country : remove_matching_from_list ( words , simplified_country ) new_match_strength += 32 for word in removed_words : if word in countryname : remove_matching_from_list ( words , word ) new_match_strength += 4 else : if word in cls . major_differentiators : new_match_strength -= 16 else : new_match_strength -= 1 for word in words : if word in cls . major_differentiators : new_match_strength -= 16 else : new_match_strength -= 1 iso3 = countriesdata [ 'countrynames2iso3' ] [ countryname ] if new_match_strength > match_strength : match_strength = new_match_strength matches = set ( ) if new_match_strength == match_strength : matches . add ( iso3 ) if len ( matches ) == 1 and match_strength > 16 : return matches . pop ( ) , False for iso3 , regex in countriesdata [ 'aliases' ] . items ( ) : index = re . search ( regex , country . upper ( ) ) if index is not None : return iso3 , False if exception is not None : raise exception return None , False
Get ISO3 code for cls . A tuple is returned with the first value being the ISO3 code and the second showing if the match is exact or not .
47,294
def load_profile ( ctx , variant_file , update , stats , profile_threshold ) : adapter = ctx . obj [ 'adapter' ] if variant_file : load_profile_variants ( adapter , variant_file ) if update : update_profiles ( adapter ) if stats : distance_dict = profile_stats ( adapter , threshold = profile_threshold ) click . echo ( table_from_dict ( distance_dict ) )
Command for profiling of samples . User may upload variants used in profiling from a vcf update the profiles for all samples and get some stats from the profiles in the database .
47,295
def add_profile_variants ( self , profile_variants ) : results = self . db . profile_variant . insert_many ( profile_variants ) return results
Add several variants to the profile_variant collection in the database
47,296
def zip_fit_params ( data ) : genes , cells = data . shape m = data . mean ( 1 ) v = data . var ( 1 ) M = ( v - m ) / ( m ** 2 + v - m ) M = np . array ( [ min ( 1.0 , max ( 0.0 , x ) ) for x in M ] ) L = m + v / m - 1.0 L [ np . isnan ( L ) ] = 0.0 L = np . array ( [ max ( 0.0 , x ) for x in L ] ) return L , M
Returns the ZIP parameters that best fit a given data set .
47,297
def zip_cluster ( data , k , init = None , max_iters = 100 ) : genes , cells = data . shape init , new_assignments = kmeans_pp ( data + eps , k , centers = init ) centers = np . copy ( init ) M = np . zeros ( centers . shape ) assignments = new_assignments for c in range ( k ) : centers [ : , c ] , M [ : , c ] = zip_fit_params_mle ( data [ : , assignments == c ] ) for it in range ( max_iters ) : lls = zip_ll ( data , centers , M ) new_assignments = np . argmax ( lls , 1 ) if np . equal ( assignments , new_assignments ) . all ( ) : return assignments , centers , M for c in range ( k ) : centers [ : , c ] , M [ : , c ] = zip_fit_params_mle ( data [ : , assignments == c ] ) assignments = new_assignments return assignments , centers , M
Performs hard EM clustering using the zero - inflated Poisson distribution .
47,298
def diffusion_mds ( means , weights , d , diffusion_rounds = 10 ) : for i in range ( diffusion_rounds ) : weights = weights * weights weights = weights / weights . sum ( 0 ) X = dim_reduce ( means , weights , d ) if X . shape [ 0 ] == 2 : return X . dot ( weights ) else : return X . T . dot ( weights )
Dimensionality reduction using MDS while running diffusion on W .
47,299
def mds ( means , weights , d ) : X = dim_reduce ( means , weights , d ) if X . shape [ 0 ] == 2 : return X . dot ( weights ) else : return X . T . dot ( weights )
Dimensionality reduction using MDS .