code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# make sure provide context is valid if not self.is_valid_context(context): error_msg = 'Context ({0}) was never seen in sequence.'.format(context) raise ValueError(error_msg) # make sure sampling is a positive integer if num < 1: error_msg = ('There must be at least one sample (specified {0}) ' 'for a context'.format(num)) raise ValueError(error_msg) # randomly select from available positions that fit the specified context available_pos = self.context2pos[context] random_pos = self.prng_dict[context].choice(available_pos, (num_permutations, num)) return random_pos
def random_context_pos(self, num, num_permutations, context)
Samples with replacement available positions matching the sequence context. Note: this method does random sampling only for an individual sequence context. Parameters ---------- num : int Number of positions to sample for each permutation. This is the number of actually observed mutations having the matching sequence context for this gene. num_permutations : int Number of permutations for permutation test. context : str Sequence context. Returns ------- random_pos : np.array num_permutations X num sized array that represents the randomly sampled positions for a specific context.
4.753926
4.721913
1.00678
position_list = [] for contxt, n in context_iterable: pos_array = self.random_context_pos(n, num_permutations, contxt) position_list.append([contxt, pos_array]) return position_list
def random_pos(self, context_iterable, num_permutations)
Obtains random positions w/ replacement which match sequence context. Parameters ---------- context_iterable: iterable containing two element tuple Records number of mutations in each context. context_iterable should be something like [('AA', 5), ...]. num_permutations : int Number of permutations used in the permutation test. Returns ------- position_list : list Contains context string and the randomly chosen positions for that context.
3.773666
3.574734
1.055649
chroms = sorted(bed_dict.keys()) multiprocess_flag = opts['processes']>0 if multiprocess_flag: num_processes = opts['processes'] else: num_processes = 1 num_permutations = opts['num_permutations'] if not opts['by_sample']: obs_result = [] else: uniq_samp = mut_df['Tumor_Sample'].unique() obs_result = pd.DataFrame(np.zeros((len(uniq_samp), len(cols))), index=uniq_samp, columns=cols) # initialize list containing output if not opts['score_dir']: result_list = [[0, 0, 0, 0, 0, 0, 0] for k in range(num_permutations)] else: result_list = [[0, 0, 0, 0, 0, 0, 0, 0, 0] for k in range(num_permutations)] # iterate over each chromosome for i in range(0, len(chroms), num_processes): if multiprocess_flag: pool = Pool(processes=num_processes) tmp_num_proc = len(chroms) - i if i + num_processes > len(chroms) else num_processes info_repeat = ((bed_dict[chroms[tmp_ix]], mut_df, opts) for tmp_ix in range(i, i+tmp_num_proc)) process_results = pool.imap(singleprocess_permutation, info_repeat) process_results.next = utils.keyboard_exit_wrapper(process_results.next) try: for chrom_result, obs_mutations in process_results: for j in range(num_permutations): result_list[j][0] += chrom_result[j][0] result_list[j][1] += chrom_result[j][1] result_list[j][2] += chrom_result[j][2] result_list[j][3] += chrom_result[j][3] result_list[j][4] += chrom_result[j][4] result_list[j][5] += chrom_result[j][5] result_list[j][6] += chrom_result[j][6] if opts['score_dir']: result_list[j][7] += chrom_result[j][7] result_list[j][8] += chrom_result[j][8] if not opts['by_sample']: obs_result.append(obs_mutations) else: obs_result = obs_result + obs_mutations except KeyboardInterrupt: pool.close() pool.join() logger.info('Exited by user. ctrl-c') sys.exit(0) pool.close() pool.join() else: info = (bed_dict[chroms[i]], mut_df, opts) chrom_result, obs_mutations = singleprocess_permutation(info) for j in range(num_permutations): result_list[j][0] += chrom_result[j][0] result_list[j][1] += chrom_result[j][1] result_list[j][2] += chrom_result[j][2] result_list[j][3] += chrom_result[j][3] result_list[j][4] += chrom_result[j][4] result_list[j][5] += chrom_result[j][5] result_list[j][6] += chrom_result[j][6] if opts['score_dir']: result_list[j][7] += chrom_result[j][7] result_list[j][8] += chrom_result[j][8] if not opts['by_sample']: obs_result.append(obs_mutations) else: obs_result = obs_result + obs_mutations return result_list, obs_result
def multiprocess_permutation(bed_dict, mut_df, opts)
Handles parallelization of permutations by splitting work by chromosome.
1.810291
1.809255
1.000572
# get variant types #var_class = cutils.get_variant_classification(germ_aa, somatic_aa, codon_pos) # get information about MGA entropy mga_path = os.path.join(sdir, gname+".mgaentropy.pickle") if os.path.exists(mga_path): if sys.version_info < (3,): # python 2.7 way with open(mga_path) as handle: mga_ent = pickle.load(handle) else: # python 3.X way with open(mga_path, 'rb') as handle: mga_ent = pickle.load(handle, encoding='latin-1') else: mga_ent = None missense_pos = [p for i, p in enumerate(codon_pos) if (germ_aa[i]!=somatic_aa[i]) and (germ_aa[i] not in ['-', '*', 'Splice_Site']) and (somatic_aa[i] not in ['-', '*', 'Splice_Site'])] total_mga_ent = compute_mga_entropy_stat(mga_ent, missense_pos, sum, default_mga) #mga_ent_ixs = [codon_pos[i] for i in range(len(var_class)) #if var_class[i] == 'Missense_Mutation'] #len_mga_ent = len(mga_ent) #mga_ent_scores = [mga_ent[ix] for ix in mga_ent_ixs if ix < len_mga_ent] #if mga_ent_scores: #total_mga_ent = sum(mga_ent_scores) #else: #total_mga_ent = default_mga #else: #total_mga_ent = no_file_flag # get information about VEST scores vest_path = os.path.join(sdir, gname+".vest.pickle") if os.path.exists(vest_path): if sys.version_info < (3,): # python 2.7 way with open(vest_path) as handle: vest_score = pickle.load(handle) else: # python 3.X way with open(vest_path, 'rb') as handle: vest_score = pickle.load(handle, encoding='latin-1') else: vest_score = None total_vest = compute_vest_stat(vest_score, germ_aa, somatic_aa, codon_pos, stat_func=sum, default_val=default_vest) #vest_scores = [vest_score.get(codon_pos[i]+1, {}).get(germ_aa[i], {}).get(somatic_aa[i], default_vest) #for i in range(len(var_class)) #if var_class[i] == 'Missense_Mutation'] #total_vest = sum(vest_scores) #else: #total_vest = no_file_flag return total_mga_ent, total_vest
def retrieve_scores(gname, sdir, codon_pos, germ_aa, somatic_aa, default_mga=5., default_vest=0, no_file_flag=-1)
Retrieves scores from pickle files. Used by summary script.
2.046449
2.063861
0.991563
vest_path = os.path.join(score_dir, gname+".vest.pickle") if os.path.exists(vest_path): if sys.version_info < (3,): with open(vest_path) as handle: gene_vest = pickle.load(handle) else: with open(vest_path, 'rb') as handle: gene_vest = pickle.load(handle, encoding='latin-1') return gene_vest else: return None
def read_vest_pickle(gname, score_dir)
Read in VEST scores for given gene. Parameters ---------- gname : str name of gene score_dir : str directory containing vest scores Returns ------- gene_vest : dict or None dict containing vest scores for gene. Returns None if not found.
1.895329
1.88679
1.004526
# return default value if VEST scores are missing if vest_dict is None: return default_val # fetch scores myscores = fetch_vest_scores(vest_dict, ref_aa, somatic_aa, codon_pos) # calculate mean score if myscores: score_stat = stat_func(myscores) else: score_stat = default_val return score_stat
def compute_vest_stat(vest_dict, ref_aa, somatic_aa, codon_pos, stat_func=np.mean, default_val=0.0)
Compute missense VEST score statistic. Note: non-missense mutations are intentially not filtered out and will take a default value of zero. Parameters ---------- vest_dict : dict dictionary containing vest scores across the gene of interest ref_aa: list of str list of reference amino acids somatic_aa: list of str somatic mutation aa codon_pos : list of int position of codon in protein sequence stat_func : function, default=np.mean function that calculates a statistic default_val : float default value to return if there are no mutations Returns ------- score_stat : float vest score statistic for provided mutation list
2.886148
2.945785
0.979755
# return default value if VEST scores are missing if mga_vec is None: return default_val # fetch scores myscores = fetch_mga_scores(mga_vec, codon_pos) # calculate mean score if myscores is not None and len(myscores): score_stat = stat_func(myscores) else: score_stat = default_val return score_stat
def compute_mga_entropy_stat(mga_vec, codon_pos, stat_func=np.mean, default_val=0.0)
Compute MGA entropy conservation statistic Parameters ---------- mga_vec : np.array numpy vector containing MGA Entropy conservation scores for residues codon_pos : list of int position of codon in protein sequence stat_func : function, default=np.mean function that calculates a statistic default_val : float default value to return if there are no mutations Returns ------- score_stat : float MGA entropy score statistic for provided mutation list
3.712401
3.721551
0.997541
vest_score_list = [] for i in range(len(somatic_aa)): # make sure position is valid if codon_pos[i] is not None: tmp_score = vest_dict.get(codon_pos[i]+1, {}).get(ref_aa[i], {}).get(somatic_aa[i], default_vest) else: tmp_score = 0.0 vest_score_list.append(tmp_score) return vest_score_list
def fetch_vest_scores(vest_dict, ref_aa, somatic_aa, codon_pos, default_vest=0.0)
Get VEST scores from pre-computed scores in dictionary. Note: either all mutations should be missense or non-missense intended to have value equal to default. Parameters ---------- vest_dict : dict dictionary containing vest scores across the gene of interest ref_aa: list of str list of reference amino acids somatic_aa: list of str somatic mutation aa codon_pos: list of int position of codon in protein sequence default_vest: float, default=0.0 value to use if VEST score not available for a given mutation Returns ------- vest_score_list: list score results for mutations
2.159358
2.330405
0.926602
# keep only positions in range of MGAEntropy scores len_mga = len(mga_vec) good_codon_pos = [p for p in codon_pos if p < len_mga] # get MGAEntropy scores if good_codon_pos: mga_ent_scores = mga_vec[good_codon_pos] else: mga_ent_scores = None return mga_ent_scores
def fetch_mga_scores(mga_vec, codon_pos, default_mga=None)
Get MGAEntropy scores from pre-computed scores in array. Parameters ---------- mga_vec : np.array numpy vector containing MGA Entropy conservation scores for residues codon_pos: list of int position of codon in protein sequence default_mga: float or None, default=None value to use if MGA entropy score not available for a given mutation. Drop mutations if no default specified. Returns ------- mga_ent_scores : np.array score results for MGA entropy conservation
3.316526
2.980897
1.112593
graph_path = os.path.join(graph_dir, gname+".pickle") if os.path.exists(graph_path): with open(graph_path) as handle: gene_graph = pickle.load(handle) return gene_graph else: return None
def read_neighbor_graph_pickle(gname, graph_dir)
Read in neighbor graph for given gene. Parameters ---------- gname : str name of gene graph_dir : str directory containing gene graphs Returns ------- gene_graph : dict or None neighbor graph as dict for gene. Returns None if not found.
2.180921
2.116139
1.030613
# skip if there are no missense mutations if not len(pos_ct): return 1.0, 0 max_pos = max(gene_graph) codon_vals = np.zeros(max_pos+1) # smooth out mutation counts for pos in pos_ct: mut_count = pos_ct[pos] # update neighbor values neighbors = list(gene_graph[pos]) num_neighbors = len(neighbors) codon_vals[neighbors] += alpha*mut_count # update self-value codon_vals[pos] += (1-alpha)*mut_count # compute the normalized entropy #total_cts = float(np.count_nonzero(codon_vals)) #graph_score = mymath.normalized_mutation_entropy(codon_vals, total_cts=total_cts) # compute regular entropy p = codon_vals / np.sum(codon_vals) graph_score = mymath.shannon_entropy(p) # get coverage coverage = np.count_nonzero(p) return graph_score, coverage
def compute_ng_stat(gene_graph, pos_ct, alpha=.5)
Compute the clustering score for the gene on its neighbor graph. Parameters ---------- gene_graph : dict Graph of spatially near codons. keys = nodes, edges = key -> value. pos_ct : dict missense mutation count for each codon alpha : float smoothing factor Returns ------- graph_score : float score measuring the clustering of missense mutations in the graph coverage : int number of nodes that received non-zero weight
3.844071
3.449
1.114547
if to_zero_based: mut_df['Start_Position'] = mut_df['Start_Position'] - 1 fs_cts = {} # frameshift count information for each gene fs_df = indel.keep_frameshifts(mut_df) for bed in utils.bed_generator(bed_path): gene_df = fs_df[fs_df['Gene']==bed.gene_name] # find it frameshift actually is on gene annotation fs_pos = [] for ix, row in gene_df.iterrows(): indel_pos = [row['Start_Position'], row['End_Position']] coding_pos = bed.query_position(bed.strand, row['Chromosome'], indel_pos) fs_pos.append(coding_pos) # mark frameshifts that could not be mapped to reference tx gene_df['unmapped'] = [(1 if x is None else 0) for x in fs_pos] total_fs = len(gene_df) unmapped_fs = len(gene_df[gene_df['unmapped']==1]) # filter out frameshifts that did not match reference tx if not use_unmapped: gene_df = gene_df[gene_df['unmapped']==0] total_fs -= unmapped_fs info = [total_fs, unmapped_fs,] fs_cts[bed.gene_name] = info # prepare counts into a dataframe fs_cts_df = pd.DataFrame.from_dict(fs_cts, orient='index') cols = ['total', 'unmapped',] fs_cts_df.columns = cols return fs_cts_df
def count_frameshift_total(mut_df, bed_path, use_unmapped=False, to_zero_based=False)
Count frameshifts for each gene. Parameters ---------- mut_df : pd.DataFrame mutation input bed_path : str path to BED file containing reference tx for genes use_unmapped : Bool flag indicating whether to include frameshifts not mapping to reference tx to_zero_based : Bool whether to convert end-coordinate to zero based for analysis Returns ------- fs_cts_df : pd.DataFrame contains both total frameshift counts and frameshift counts not mappable to the reference transcript.
3.194492
3.073259
1.039448
if strand == '-': ss_seq = fasta.fetch(reference=chrom, start=end-1, end=end+3) ss_seq = utils.rev_comp(ss_seq) elif strand == '+': ss_seq = fasta.fetch(reference=chrom, start=start-3, end=start+1) ss_fasta = '>{0};exon{1};3SS\n{2}\n'.format(gene_name, exon_num, ss_seq.upper()) return ss_fasta
def _fetch_3ss_fasta(fasta, gene_name, exon_num, chrom, strand, start, end)
Retreives the 3' SS sequence flanking the specified exon. Returns a string in fasta format with the first line containing a ">" and the second line contains the two base pairs of 3' SS. Parameters ---------- fasta : pysam.Fastafile fasta object from pysam gene_name : str gene name used for fasta seq id exon_num : int the `exon_num` exon, used for seq id chrom : str chromsome strand : str strand, {'+', '-'} start : int 0-based start position end : int 0-based end position Returns ------- ss_fasta : str string in fasta format with first line being seq id
2.515972
2.424369
1.037784
gene_fasta = '' strand = gene_bed.strand exons = gene_bed.get_exons() if strand == '-': exons.reverse() # order exons 5' to 3', so reverse if '-' strand # iterate over exons for i, exon in enumerate(exons): exon_seq = fasta_obj.fetch(reference=gene_bed.chrom, start=exon[0], end=exon[1]).upper() if strand == '-': exon_seq = utils.rev_comp(exon_seq) exon_fasta = '>{0};exon{1}\n{2}\n'.format(gene_bed.gene_name, i, exon_seq) # get splice site sequence if len(exons) == 1: # splice sites don't matter if there is no splicing ss_fasta = '' elif i == 0: # first exon only, get 3' SS ss_fasta = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i, gene_bed.chrom, strand, exon[0], exon[1]) elif i == (len(exons) - 1): # last exon only, get 5' SS ss_fasta = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i, gene_bed.chrom, strand, exon[0], exon[1]) else: # middle exon, get bot 5' and 3' SS fasta_3ss = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i, gene_bed.chrom, strand, exon[0], exon[1]) fasta_5ss = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i, gene_bed.chrom, strand, exon[0], exon[1]) ss_fasta = fasta_5ss + fasta_3ss gene_fasta += exon_fasta + ss_fasta return gene_fasta
def fetch_gene_fasta(gene_bed, fasta_obj)
Retreive gene sequences in FASTA format. Parameters ---------- gene_bed : BedLine BedLine object representing a single gene fasta_obj : pysam.Fastafile fasta object for index retreival of sequence Returns ------- gene_fasta : str sequence of gene in FASTA format
2.087579
2.104424
0.991996
exon_seq_list, five_ss_seq_list, three_ss_seq_list = self._fetch_seq() self.exon_seq = ''.join(exon_seq_list) self.three_prime_seq = three_ss_seq_list self.five_prime_seq = five_ss_seq_list self._to_upper()
def _reset_seq(self)
Updates attributes for gene represented in the self.bed attribute. Sequences are always upper case.
3.717751
3.165327
1.174523
if len(germline_nucs) != len(coding_pos): raise ValueError('Each germline nucleotide should have a coding position') es = list(self.exon_seq) for i in range(len(germline_nucs)): gl_nuc, cpos = germline_nucs[i].upper(), coding_pos[i] if not utils.is_valid_nuc(gl_nuc): raise ValueError('{0} is not a valid nucleotide'.format(gl_nuc)) if cpos >= 0: es[cpos] = gl_nuc self.exon_seq = ''.join(es)
def add_germline_variants(self, germline_nucs, coding_pos)
Add potential germline variants into the nucleotide sequence. Sequenced individuals may potentially have a SNP at a somatic mutation position. Therefore they may differ from the reference genome. This method updates the gene germline gene sequence to match the actual individual. Parameters ---------- germline_nucs : list of str list of DNA nucleotides containing the germline letter coding_pos : int 0-based nucleotide position in coding sequence NOTE: the self.exon_seq attribute is updated, no return value
2.394924
2.360155
1.014732
self.exon_seq = self.exon_seq.upper() self.three_prime_seq = [s.upper() for s in self.three_prime_seq] self.five_prime_seq = [s.upper() for s in self.five_prime_seq]
def _to_upper(self)
Convert sequences to upper case.
2.426005
2.142308
1.132426
exons = [] three_prime_ss = [] five_prime_ss = [] num_exons = self.bed.get_num_exons() for i in range(num_exons): # add exon sequence tmp_id = '{0};exon{1}'.format(self.bed.gene_name, i) tmp_exon = self.fasta.fetch(reference=tmp_id) exons.append(tmp_exon) # add splice site sequence tmp_id_3ss = '{0};3SS'.format(tmp_id) tmp_id_5ss = '{0};5SS'.format(tmp_id) if num_exons == 1: pass elif i == 0: tmp_5ss = self.fasta.fetch(tmp_id_5ss) five_prime_ss.append(tmp_5ss) elif i == (num_exons - 1): tmp_3ss = self.fasta.fetch(tmp_id_3ss) three_prime_ss.append(tmp_3ss) else: tmp_3ss = self.fasta.fetch(tmp_id_3ss) tmp_5ss = self.fasta.fetch(tmp_id_5ss) three_prime_ss.append(tmp_3ss) five_prime_ss.append(tmp_5ss) return exons, five_prime_ss, three_prime_ss
def _fetch_seq(self)
Fetches gene sequence from PySAM fasta object. Returns ------- exons : list of str list of exon nucleotide sequences five_prime_ss : list of str list of 5' splice site sequences three_prime_ss : list of str list of 3' splice site sequences
1.89952
1.7084
1.111871
chrom_list = [] for chrom in chroms: # fix chrom numbering chrom = str(chrom) chrom = chrom.replace('23', 'X') chrom = chrom.replace('24', 'Y') chrom = chrom.replace('25', 'Mt') if not chrom.startswith('chr'): chrom = 'chr' + chrom chrom_list.append(chrom) return chrom_list
def correct_chrom_names(chroms)
Make sure chromosome names follow UCSC chr convention.
2.135382
2.122339
1.006146
pvals = np.asarray(pvals) degrees_of_freedom = 2 * pvals.size chisq_stat = np.sum(-2*np.log(pvals)) fishers_pval = stats.chi2.sf(chisq_stat, degrees_of_freedom) return fishers_pval
def fishers_method(pvals)
Fisher's method for combining independent p-values.
2.444413
2.45075
0.997414
for i in range(1, len(x)): if x[i-1] < x[i]: x[i] = x[i-1] return x
def cummin(x)
A python implementation of the cummin function in R
1.857548
2.03298
0.913707
pval_array = np.array(pval) sorted_order = np.argsort(pval_array) original_order = np.argsort(sorted_order) pval_array = pval_array[sorted_order] # calculate the needed alpha n = float(len(pval)) pval_adj = np.zeros(int(n)) i = np.arange(1, int(n)+1, dtype=float)[::-1] # largest to smallest pval_adj = np.minimum(1, cummin(n/i * pval_array[::-1]))[::-1] return pval_adj[original_order]
def bh_fdr(pval)
A python implementation of the Benjamani-Hochberg FDR method. This code should always give precisely the same answer as using p.adjust(pval, method="BH") in R. Parameters ---------- pval : list or array list/array of p-values Returns ------- pval_adj : np.array adjusted p-values according the benjamani-hochberg method
3.527563
3.684375
0.957439
#prng = np.random.RandomState(seed) if len(mut_info) > 0: mut_info['Coding Position'] = mut_info['Coding Position'].astype(int) mut_info['Context'] = mut_info['Coding Position'].apply(lambda x: sc.pos2context[x]) # group mutations by context cols = ['Context', 'Tumor_Allele'] unmapped_mut_df = pd.DataFrame(unmapped_mut_info) tmp_df = pd.concat([mut_info[cols], unmapped_mut_df[cols]]) context_cts = tmp_df['Context'].value_counts() context_to_mutations = dict((name, group['Tumor_Allele']) for name, group in tmp_df.groupby('Context')) # get deleterious info for actual mutations aa_mut_info = mc.get_aa_mut_info(mut_info['Coding Position'], mut_info['Tumor_Allele'].tolist(), gs) ref_aa = aa_mut_info['Reference AA'] + unmapped_mut_info['Reference AA'] somatic_aa = aa_mut_info['Somatic AA'] + unmapped_mut_info['Somatic AA'] codon_pos = aa_mut_info['Codon Pos'] + unmapped_mut_info['Codon Pos'] num_del = cutils.calc_deleterious_info(ref_aa, somatic_aa, codon_pos) #num_del = fs_ct + num_snv_del # skip permutation test if number of deleterious mutations is not at # least meet some user-specified threshold if num_del >= del_threshold: # perform permutations del_p_value = pm.deleterious_permutation(num_del, context_cts, context_to_mutations, sc, # sequence context obj gs, # gene sequence obj num_permutations, stop_thresh, pseudo_count) else: del_p_value = None else: num_del = 0 del_p_value = None result = [bed.gene_name, num_del, del_p_value] return result
def calc_deleterious_p_value(mut_info, unmapped_mut_info, sc, gs, bed, num_permutations, stop_thresh, del_threshold, pseudo_count, seed=None)
Calculates the p-value for the number of inactivating SNV mutations. Calculates p-value based on how many simulations exceed the observed value. Parameters ---------- mut_info : dict contains codon and amino acid residue information for mutations mappable to provided reference tx. unmapped_mut_info : dict contains codon/amino acid residue info for mutations that are NOT mappable to provided reference tx. fs_ct : int number of frameshifts for gene prob_inactive : float proportion of inactivating mutations out of total over all genes sc : SequenceContext object contains the nucleotide contexts for a gene such that new random positions can be obtained while respecting nucleotide context. gs : GeneSequence contains gene sequence bed : BedLine just used to return gene name num_permutations : int number of permutations to perform to estimate p-value. more permutations means more precision on the p-value. seed : int (Default: None) seed number to random number generator (None to be randomly set)
3.234441
3.11131
1.039575
if len(mut_info) > 0: mut_info['Coding Position'] = mut_info['Coding Position'].astype(int) mut_info['Context'] = mut_info['Coding Position'].apply(lambda x: sc.pos2context[x]) # group mutations by context cols = ['Context', 'Tumor_Allele'] unmapped_mut_df = pd.DataFrame(unmapped_mut_info) tmp_df = pd.concat([mut_info[cols], unmapped_mut_df[cols]]) context_cts = tmp_df['Context'].value_counts() context_to_mutations = dict((name, group['Tumor_Allele']) for name, group in tmp_df.groupby('Context')) # get vest scores for gene if directory provided if graph_dir: gene_graph = scores.read_neighbor_graph_pickle(bed.gene_name, graph_dir) if gene_graph is None: logger.warning('Could not find neighbor graph for {0}, skipping . . .'.format(bed.gene_name)) else: gene_graph = None # get recurrent info for actual mutations aa_mut_info = mc.get_aa_mut_info(mut_info['Coding Position'], mut_info['Tumor_Allele'].tolist(), gs) codon_pos = aa_mut_info['Codon Pos'] + unmapped_mut_info['Codon Pos'] ref_aa = aa_mut_info['Reference AA'] + unmapped_mut_info['Reference AA'] somatic_aa = aa_mut_info['Somatic AA'] + unmapped_mut_info['Somatic AA'] num_recurrent, pos_ent, delta_pos_ent, pos_ct = cutils.calc_pos_info(codon_pos, ref_aa, somatic_aa, min_frac=min_fraction, min_recur=min_recurrent) try: # get vest score for actual mutations graph_score, coverage = scores.compute_ng_stat(gene_graph, pos_ct) # perform simulations to get p-value protein_p_value, norm_graph_score = pm.protein_permutation( graph_score, len(pos_ct), context_cts, context_to_mutations, sc, # sequence context obj gs, # gene sequence obj gene_graph, num_permutations, stop_thresh ) except Exception as err: exc_info = sys.exc_info() norm_graph_score = 0.0 protein_p_value = 1.0 logger.warning('Codon numbering problem with '+bed.gene_name) else: norm_graph_score = 0.0 protein_p_value = 1.0 num_recurrent = 0 result = [bed.gene_name, num_recurrent, norm_graph_score, protein_p_value] return result
def calc_protein_p_value(mut_info, unmapped_mut_info, sc, gs, bed, graph_dir, num_permutations, stop_thresh, min_recurrent, min_fraction)
Computes the p-value for clustering on a neighbor graph composed of codons connected with edges if they are spatially near in 3D protein structure. Parameters ---------- Returns -------
3.665694
3.757232
0.975637
return -np.sum(np.where(p!=0, p * np.log2(p), 0))
def shannon_entropy(p)
Calculates shannon entropy in bits. Parameters ---------- p : np.array array of probabilities Returns ------- shannon entropy in bits
2.807511
4.778575
0.58752
cts = np.asarray(counts, dtype=float) if total_cts is None: total_cts = np.sum(cts) if total_cts > 1: p = cts / total_cts ent = shannon_entropy(p) max_ent = max_shannon_entropy(total_cts) norm_ent = ent / max_ent else: norm_ent = 1.0 return norm_ent
def normalized_mutation_entropy(counts, total_cts=None)
Calculate the normalized mutation entropy based on a list/array of mutation counts. Note: Any grouping of mutation counts together should be done before hand Parameters ---------- counts : np.array_like array/list of mutation counts Returns ------- norm_ent : float normalized entropy of mutation count distribution.
2.486424
2.885573
0.861674
# make sure numpy arrays are floats p = p.astype(float) q = q.astype(float) # compute kl divergence kl = np.sum(np.where(p!=0, p*np.log2(p/q), 0)) return kl
def kl_divergence(p, q)
Compute the Kullback-Leibler (KL) divergence for discrete distributions. Parameters ---------- p : np.array "Ideal"/"true" Probability distribution q : np.array Approximation of probability distribution p Returns ------- kl : float KL divergence of approximating p with the distribution q
2.591711
3.135802
0.826491
m = .5 * (p+q) js_div = .5*kl_divergence(p, m) + .5*kl_divergence(q, m) return js_div
def js_divergence(p, q)
Compute the Jensen-Shannon Divergence between two discrete distributions. Parameters ---------- p : np.array probability mass array (sums to 1) q : np.array probability mass array (sums to 1) Returns ------- js_div : float js divergence between the two distrubtions
3.172861
3.582508
0.885654
js_dist = np.sqrt(js_divergence(p, q)) return js_dist
def js_distance(p, q)
Compute the Jensen-Shannon distance between two discrete distributions. NOTE: JS divergence is not a metric but the sqrt of JS divergence is a metric and is called the JS distance. Parameters ---------- p : np.array probability mass array (sums to 1) q : np.array probability mass array (sums to 1) Returns ------- js_dist : float Jensen-Shannon distance between two discrete distributions
5.202021
5.096797
1.020645
# define coding region coding_start = int(self.bed_tuple.thickStart) coding_end = int(self.bed_tuple.thickEnd) if (coding_end - coding_start) < 3: # coding regions should have at least one codon, otherwise the # region is invalid and does not indicate an actually coding region logger.debug('{0} has an invalid coding region specified by thickStart ' 'and thickEnd (only {1} bps long). This gene is possibly either ' 'a non-coding transcript or a pseudo gene.'.format(self.gene_name, coding_end-coding_start)) return [] filtered_exons = [] for exon in ex: if exon[0] > coding_end and exon[1] > coding_end: # exon has no coding region pass elif exon[0] < coding_start and exon[1] < coding_start: # exon has no coding region pass elif exon[0] <= coding_start and exon[1] >= coding_end: # coding region entirely contained within one exon filtered_exons.append((coding_start, coding_end)) elif exon[0] <= coding_start and exon[1] < coding_end: # only beginning of exon contains UTR filtered_exons.append((coding_start, exon[1])) elif exon[0] > coding_start and exon[1] >= coding_end: # only end part of exon contains UTR filtered_exons.append((exon[0], coding_end)) elif exon[0] > coding_start and exon[1] < coding_end: # entire exon is coding filtered_exons.append(exon) else: # exon is only a UTR pass return filtered_exons
def _filter_utr(self, ex)
Filter out UTR regions from the exon list (ie retain only coding regions). Coding regions are defined by the thickStart and thickEnd attributes. Parameters ---------- ex : list of tuples list of exon positions, [(ex1_start, ex1_end), ...] Returns ------- filtered_exons : list of tuples exons with UTR regions "chopped" out
2.836432
2.678349
1.059023
exon_starts = [self.chrom_start + int(s) for s in self.bed_tuple.blockStarts.strip(',').split(',')] exon_sizes = list(map(int, self.bed_tuple.blockSizes.strip(',').split(','))) # get chromosome intervals exons = [(exon_starts[i], exon_starts[i] + exon_sizes[i]) for i in range(len(exon_starts))] no_utr_exons = self._filter_utr(exons) self.exons = no_utr_exons self.exon_lens = [e[1] - e[0] for e in self.exons] self.num_exons = len(self.exons) self.cds_len = sum(self.exon_lens) self.five_ss_len = 2*(self.num_exons-1) self.three_ss_len = 2*(self.num_exons-1) self._init_splice_site_pos()
def _init_exons(self)
Sets a list of position intervals for each exon. Only coding regions as defined by thickStart and thickEnd are kept. Exons are stored in the self.exons attribute.
2.678381
2.682771
0.998364
self.seqpos2genome = {} # record genome positions for each sequence position seq_pos = 0 for estart, eend in self.exons: for genome_pos in range(estart, eend): if self.strand == '+': self.seqpos2genome[seq_pos] = genome_pos elif self.strand == '-': tmp = self.cds_len - seq_pos - 1 self.seqpos2genome[tmp] = genome_pos seq_pos += 1 # recode 5' splice site locations for i in range(0, self.five_ss_len): seq_pos = self.cds_len + i ss_ix = i // 2 # the ss_ix'th 5'ss starting from upstream tx pos_in_ss = i % 2 # whether first/second nuc in splice site # determine genome coordinates for 5' splice site if self.strand == '+': self.seqpos2genome[seq_pos] = self.exons[ss_ix][1] + pos_in_ss else: exon_pos = -1 - ss_ix self.seqpos2genome[seq_pos] = self.exons[exon_pos][0] - pos_in_ss - 1 # recode 3' splice site locations for i in range(0, self.three_ss_len): seq_pos = self.cds_len + self.five_ss_len + i ss_ix = i // 2 # the ss_ix'th 3'ss starting from upstream tx pos_in_ss = i % 2 # whether first/second nuc in splice site # determine genome coordinates for 3' splice site if self.strand == '+': self.seqpos2genome[seq_pos] = self.exons[ss_ix+1][0] - 2 + pos_in_ss else: exon_pos = -1 - ss_ix self.seqpos2genome[seq_pos] = self.exons[exon_pos-1][1] + 1 - pos_in_ss
def init_genome_coordinates(self)
Creates the self.seqpos2genome dictionary that converts positions relative to the sequence to genome coordinates.
2.316622
2.223539
1.041863
# first check if valid pos = None # initialize to invalid pos if chr != self.chrom: #logger.debug('Wrong chromosome queried. You provided {0} but gene is ' #'on {1}.'.format(chr, self.chrom)) # return pos pass if type(genome_coord) is list: # handle case for indels pos_left = self.query_position(strand, chr, genome_coord[0]) pos_right = self.query_position(strand, chr, genome_coord[1]) if pos_left is not None or pos_right is not None: return [pos_left, pos_right] else: return None # return position if contained within coding region or splice site for i, (estart, eend) in enumerate(self.exons): # in coding region if estart <= genome_coord < eend: if strand == '+': prev_lens = sum(self.exon_lens[:i]) # previous exon lengths pos = prev_lens + (genome_coord - estart) elif strand == '-': prev_lens = sum(self.exon_lens[:i]) # previous exon lengths pos = prev_lens + (genome_coord - estart) pos = self.cds_len - pos - 1 # flip coords because neg strand return pos # in splice site elif (eend <= genome_coord < eend + 2) and i != self.num_exons-1: if strand == '+': pos = self.cds_len + 2*i + (genome_coord - eend) elif strand == '-': pos = self.cds_len + self.five_ss_len + 2*(self.num_exons-(i+2)) + (genome_coord - eend) return pos # in splice site elif (estart - 2 <= genome_coord < estart) and i != 0: if strand == '-': pos = self.cds_len + 2*(self.num_exons-(i+2)) + (genome_coord - (estart - 2)) elif strand == '+': pos = self.cds_len + self.five_ss_len + 2*(i-1) + (genome_coord - (estart - 2)) return pos return pos
def query_position(self, strand, chr, genome_coord)
Provides the relative position on the coding sequence for a given genomic position. Parameters ---------- chr : str chromosome, provided to check validity of query genome_coord : int 0-based position for mutation, actually used to get relative coding pos Returns ------- pos : int or None position of mutation in coding sequence, returns None if mutation does not match region found in self.exons
2.686636
2.675242
1.004259
if not log_file: # create log directory if it doesn't exist log_dir = os.path.abspath('log') + '/' if not os.path.isdir(log_dir): os.mkdir(log_dir) # path to new log file log_file = log_dir + 'log.run.' + str(datetime.datetime.now()).replace(':', '.') + '.txt' # logger options lvl = logging.DEBUG if log_level.upper() == 'DEBUG' else logging.INFO # ignore warnings if not in debug if log_level.upper() != 'DEBUG': warnings.filterwarnings('ignore') # define logging format if verbose: myformat = '%(asctime)s - %(name)s - %(levelname)s \n>>> %(message)s' else: myformat = '%(message)s' # create logger if not log_file == 'stdout': # normal logging to a regular file logging.basicConfig(level=lvl, format=myformat, filename=log_file, filemode='w') else: # logging to stdout root = logging.getLogger() root.setLevel(lvl) stdout_stream = logging.StreamHandler(sys.stdout) stdout_stream.setLevel(lvl) formatter = logging.Formatter(myformat) stdout_stream.setFormatter(formatter) root.addHandler(stdout_stream) root.propagate = True
def start_logging(log_file='', log_level='INFO', verbose=False)
Start logging information into the log directory. If os.devnull is specified as the log_file then the log file will not actually be written to a file.
2.399486
2.41876
0.992032
@wraps(f) def wrapper(*args, **kwds): try: result = f(*args, **kwds) return result except KeyboardInterrupt: logger.info('Ctrl-C stopped a process.') except Exception as e: logger.exception(e) raise return wrapper
def log_error_decorator(f)
Writes exception to log file if occured in decorated function. This decorator wrapper is needed for multiprocess logging since otherwise the python multiprocessing module will obscure the actual line of the error.
2.765519
3.005681
0.920097
# indices need to be in reverse order for filtering # to prevent .pop() from yielding eroneous results bad_ixs = sorted(bad_ixs, reverse=True) for i in bad_ixs: mylist.pop(i) return mylist
def filter_list(mylist, bad_ixs)
Removes indices from a list. All elements in bad_ixs will be removed from the list. Parameters ---------- mylist : list list to filter out specific indices bad_ixs : list of ints indices to remove from list Returns ------- mylist : list list with elements filtered out
5.314985
6.603494
0.804875
rev_seq = seq[::-1] rev_comp_seq = ''.join([base_pairing[s] for s in rev_seq]) return rev_comp_seq
def rev_comp(seq)
Get reverse complement of sequence. rev_comp will maintain the case of the sequence. Parameters ---------- seq : str nucleotide sequence. valid {a, c, t, g, n} Returns ------- rev_comp_seq : str reverse complement of sequence
2.872111
4.67189
0.614764
with open(bed_path) as handle: bed_reader = csv.reader(handle, delimiter='\t') for line in bed_reader: yield BedLine(line)
def bed_generator(bed_path)
Iterates through a BED file yielding parsed BED lines. Parameters ---------- bed_path : str path to BED file Yields ------ BedLine(line) : BedLine A BedLine object which has parsed the individual line in a BED file.
2.44282
2.55916
0.95454
# read in entire bed file into a dict with keys as chromsomes bed_dict = OrderedDict() for bed_row in bed_generator(file_path): is_restrict_flag = restricted_genes is None or bed_row.gene_name in restricted_genes if is_restrict_flag: bed_dict.setdefault(bed_row.chrom, []) bed_dict[bed_row.chrom].append(bed_row) sort_chroms = sorted(bed_dict.keys(), key=lambda x: len(bed_dict[x]), reverse=True) bed_dict = OrderedDict((chrom, bed_dict[chrom]) for chrom in sort_chroms) return bed_dict
def read_bed(file_path, restricted_genes=None)
Reads BED file and populates a dictionary separating genes by chromosome. Parameters ---------- file_path : str path to BED file filtered_genes: list list of gene names to not use Returns ------- bed_dict: dict dictionary mapping chromosome keys to a list of BED lines
2.478613
2.711614
0.914073
# only keep allowed mutation types orig_len = len(mutation_df) # number of mutations before filtering mutation_df = mutation_df[mutation_df.Variant_Classification.isin(variant_snv)] # only keep SNV type_len = len(mutation_df) # number of mutations after filtering based on mut type # log the number of dropped mutations log_msg = ('Dropped {num_dropped} mutations after only keeping ' '{mut_types}. Indels are processed separately.'.format(num_dropped=orig_len-type_len, mut_types=', '.join(variant_snv))) logger.info(log_msg) # check if mutations are valid SNVs valid_nuc_flag = (mutation_df['Reference_Allele'].apply(is_valid_nuc) & \ mutation_df['Tumor_Allele'].apply(is_valid_nuc)) mutation_df = mutation_df[valid_nuc_flag] # filter bad lines mutation_df = mutation_df[mutation_df['Tumor_Allele'].apply(lambda x: len(x)==1)] mutation_df = mutation_df[mutation_df['Reference_Allele'].apply(lambda x: len(x)==1)] valid_len = len(mutation_df) # log the number of dropped mutations log_msg = ('Dropped {num_dropped} mutations after only keeping ' 'valid SNVs'.format(num_dropped=type_len-valid_len)) logger.info(log_msg) # drop duplicate mutations if only_unique: dup_cols = ['Tumor_Sample', 'Chromosome', 'Start_Position', 'End_Position', 'Reference_Allele', 'Tumor_Allele'] mutation_df = mutation_df.drop_duplicates(subset=dup_cols) # log results of de-duplication dedup_len = len(mutation_df) log_msg = ('Dropped {num_dropped} mutations when removing ' 'duplicates'.format(num_dropped=valid_len-dedup_len)) logger.info(log_msg) # add dummy Protein_Change or Tumor_Type columns if not provided # in file if 'Tumor_Type' not in mutation_df.columns: mutation_df['Tumor_Type'] = '' if 'Protein_Change' not in mutation_df.columns: mutation_df['Protein_Change'] = '' # correct for 1-based coordinates mutation_df['Start_Position'] = mutation_df['Start_Position'].astype(int) - 1 return mutation_df
def _fix_mutation_df(mutation_df, only_unique=False)
Drops invalid mutations and corrects for 1-based coordinates. TODO: Be smarter about what coordinate system is put in the provided mutations. Parameters ---------- mutation_df : pd.DataFrame user provided mutations only_unique : bool flag indicating whether only unique mutations for each tumor sample should be kept. This avoids issues when the same mutation has duplicate reportings. Returns ------- mutation_df : pd.DataFrame mutations filtered for being valid and correct mutation type. Also converted 1-base coordinates to 0-based.
2.556937
2.51576
1.016368
pos_ctr, pos_sum = {}, {w: {} for w in window} num_pos = len(aa_mut_pos) # figure out the missense mutations for i in range(num_pos): pos = aa_mut_pos[i] # make sure mutation is missense if germ_aa[i] and somatic_aa[i] and germ_aa[i] != '*' and \ somatic_aa[i] != '*' and germ_aa[i] != somatic_aa[i]: # should have a position, but if not skip it if pos is not None: pos_ctr.setdefault(pos, 0) pos_ctr[pos] += 1 # calculate windowed sum pos_list = sorted(pos_ctr.keys()) max_window = max(window) for ix, pos in enumerate(pos_list): tmp_sum = {w: 0 for w in window} # go through the same and lower positions for k in reversed(range(ix+1)): pos2 = pos_list[k] if pos2 < pos-max_window: break for w in window: if pos-w <= pos2: tmp_sum[w] += pos_ctr[pos2] # go though the higher positions for l in range(ix+1, len(pos_list)): pos2 = pos_list[l] if pos2 > pos+max_window: break for w in window: if pos2 <= pos+w: tmp_sum[w] += pos_ctr[pos2] # iterate through all other positions #for pos2 in pos_list: #for w in window: #if pos-w <= pos2 <= pos+w: #tmp_sum[w] += pos_ctr[pos2] # update windowed counts for w in window: pos_sum[w][pos] = tmp_sum[w] return pos_ctr, pos_sum
def calc_windowed_sum(aa_mut_pos, germ_aa, somatic_aa, window=[3])
Calculate the sum of mutations within a window around a particular mutated codon. Parameters ---------- aa_mut_pos : list list of mutated amino acid positions germ_aa : list Reference amino acid somatic_aa : list Somatic amino acid (if missense) window : list List of windows to calculate for Returns ------- pos_ctr : dict dictionary of mutated positions (key) with associated counts (value) pos_sum : dict of dict Window size as first key points to dictionary of mutated positions (key) with associated mutation count within the window size (value)
2.48371
2.494027
0.995863
if context_num == 0: return ['None'] elif context_num == 1: return ['A', 'C', 'T', 'G'] elif context_num == 1.5: return ['C*pG', 'CpG*', 'TpC*', 'G*pA', 'A', 'C', 'T', 'G'] elif context_num == 2: dinucs = list(set( [d1+d2 for d1 in 'ACTG' for d2 in 'ACTG'] )) return dinucs elif context_num == 3: trinucs = list(set( [t1+t2+t3 for t1 in 'ACTG' for t2 in 'ACTG' for t3 in 'ACTG'] )) return trinucs
def get_all_context_names(context_num)
Based on the nucleotide base context number, return a list of strings representing each context. Parameters ---------- context_num : int number representing the amount of nucleotide base context to use. Returns ------- a list of strings containing the names of the base contexts
2.754045
2.810209
0.980015
# check if string is correct length if len(tri_nuc) != 3: raise ValueError('Chasm context requires a three nucleotide string ' '(Provided: "{0}")'.format(tri_nuc)) # try dinuc context if found if tri_nuc[1:] == 'CG': return 'C*pG' elif tri_nuc[:2] == 'CG': return 'CpG*' elif tri_nuc[:2] == 'TC': return 'TpC*' elif tri_nuc[1:] == 'GA': return 'G*pA' else: # just return single nuc context return tri_nuc[1]
def get_chasm_context(tri_nuc)
Returns the mutation context acording to CHASM. For more information about CHASM's mutation context, look at http://wiki.chasmsoftware.org/index.php/CHASM_Overview. Essentially CHASM uses a few specified di-nucleotide contexts followed by single nucleotide context. Parameters ---------- tri_nuc : str three nucleotide string with mutated base in the middle. Returns ------- chasm context : str a string representing the context used in CHASM
4.083389
3.890767
1.049508
# if no mutations return empty result if not somatic_base: aa_info = {'Reference Codon': [], 'Somatic Codon': [], 'Codon Pos': [], 'Reference Nuc': [], 'Reference AA': [], 'Somatic AA': []} return aa_info # get codon information into three lists ref_codon, codon_pos, pos_in_codon, ref_nuc = zip(*[cutils.pos_to_codon(gene_seq, p) for p in coding_pos]) ref_codon, codon_pos, pos_in_codon, ref_nuc = list(ref_codon), list(codon_pos), list(pos_in_codon), list(ref_nuc) # construct codons for mutations mut_codon = [(list(x) if x != 'Splice_Site' else []) for x in ref_codon] for i in range(len(mut_codon)): # splice site mutations are not in a codon, so skip such mutations to # prevent an error if pos_in_codon[i] is not None: pc = pos_in_codon[i] mut_codon[i][pc] = somatic_base[i] mut_codon = [(''.join(x) if x else 'Splice_Site') for x in mut_codon] # output resulting info aa_info = {'Reference Codon': ref_codon, 'Somatic Codon': mut_codon, 'Codon Pos': codon_pos, 'Reference Nuc': ref_nuc, 'Reference AA': [(utils.codon_table[r] if (r in utils.codon_table) else None) for r in ref_codon], 'Somatic AA': [(utils.codon_table[s] if (s in utils.codon_table) else None) for s in mut_codon]} return aa_info
def get_aa_mut_info(coding_pos, somatic_base, gene_seq)
Retrieves relevant information about the effect of a somatic SNV on the amino acid of a gene. Information includes the germline codon, somatic codon, codon position, germline AA, and somatic AA. Parameters ---------- coding_pos : iterable of ints Contains the base position (0-based) of the mutations somatic_base : list of str Contains the somatic nucleotide for the mutations gene_seq : GeneSequence gene sequence Returns ------- aa_info : dict information about the somatic mutation effect on AA's
2.627751
2.588983
1.014974
permutation_df = pd.DataFrame(sorted(permutation_result, key=lambda x: x[2] if x[2] is not None else 1.1), columns=['gene', 'inactivating count', 'inactivating p-value', 'Total SNV Mutations', 'SNVs Unmapped to Ref Tx']) permutation_df['inactivating p-value'] = permutation_df['inactivating p-value'].astype('float') tmp_df = permutation_df[permutation_df['inactivating p-value'].notnull()] # get benjamani hochberg adjusted p-values permutation_df['inactivating BH q-value'] = np.nan permutation_df.loc[tmp_df.index, 'inactivating BH q-value'] = mypval.bh_fdr(tmp_df['inactivating p-value']) # sort output by p-value. due to no option to specify NaN order in # sort, the df needs to sorted descendingly and then flipped permutation_df = permutation_df.sort_values(by='inactivating p-value', ascending=False) permutation_df = permutation_df.reindex(index=permutation_df.index[::-1]) # order result permutation_df = permutation_df.set_index('gene', drop=False) col_order = ['gene', 'Total SNV Mutations', 'SNVs Unmapped to Ref Tx', #'Total Frameshift Mutations', 'Frameshifts Unmapped to Ref Tx', 'inactivating count', 'inactivating p-value', 'inactivating BH q-value'] return permutation_df[col_order]
def handle_tsg_results(permutation_result)
Handles result from TSG results. Takes in output from multiprocess_permutation function and converts to a better formatted dataframe. Parameters ---------- permutation_result : list output from multiprocess_permutation Returns ------- permutation_df : pd.DataFrame formatted output suitable to save
3.791451
3.91062
0.969527
mycols = ['gene', 'num recurrent', 'position entropy', 'mean vest score', 'entropy p-value', 'vest p-value', 'Total Mutations', 'Unmapped to Ref Tx'] permutation_df = pd.DataFrame(permutation_result, columns=mycols) # get benjamani hochberg adjusted p-values permutation_df['entropy BH q-value'] = mypval.bh_fdr(permutation_df['entropy p-value']) permutation_df['vest BH q-value'] = mypval.bh_fdr(permutation_df['vest p-value']) # combine p-values permutation_df['tmp entropy p-value'] = permutation_df['entropy p-value'] permutation_df['tmp vest p-value'] = permutation_df['vest p-value'] permutation_df.loc[permutation_df['entropy p-value']==0, 'tmp entropy p-value'] = 1. / num_permutations permutation_df.loc[permutation_df['vest p-value']==0, 'tmp vest p-value'] = 1. / num_permutations permutation_df['combined p-value'] = permutation_df[['entropy p-value', 'vest p-value']].apply(mypval.fishers_method, axis=1) permutation_df['combined BH q-value'] = mypval.bh_fdr(permutation_df['combined p-value']) del permutation_df['tmp vest p-value'] del permutation_df['tmp entropy p-value'] # order output permutation_df = permutation_df.set_index('gene', drop=False) # make sure genes are indices permutation_df['num recurrent'] = permutation_df['num recurrent'].fillna(-1).astype(int) # fix dtype isssue col_order = ['gene', 'Total Mutations', 'Unmapped to Ref Tx', 'num recurrent', 'position entropy', 'mean vest score', 'entropy p-value', 'vest p-value', 'combined p-value', 'entropy BH q-value', 'vest BH q-value', 'combined BH q-value'] permutation_df = permutation_df.sort_values(by=['combined p-value']) return permutation_df[col_order]
def handle_oncogene_results(permutation_result, num_permutations)
Takes in output from multiprocess_permutation function and converts to a better formatted dataframe. Parameters ---------- permutation_result : list output from multiprocess_permutation Returns ------- permutation_df : pd.DataFrame formatted output suitable to save
2.696193
2.760003
0.976881
if len(permutation_result[0]) == 6: mycols = ['gene', 'window length', 'codon position', 'mutation count', 'windowed sum', 'p-value'] else: mycols = ['gene', 'window length', 'codon position', 'index', 'mutation count', 'windowed sum', 'p-value'] permutation_df = pd.DataFrame(permutation_result, columns=mycols) # get benjamani hochberg adjusted p-values permutation_df['q-value'] = 1 for w in permutation_df['window length'].unique(): is_window = permutation_df['window length'] == w permutation_df.loc[is_window, 'q-value'] = mypval.bh_fdr(permutation_df.loc[is_window, 'p-value']) #permutation_df['q-value'] = mypval.bh_fdr(permutation_df['p-value']) # order output #permutation_df = permutation_df.set_index('gene', drop=False) # make sure genes are indices col_order = mycols + ['q-value'] permutation_df = permutation_df.sort_values(by=['window length', 'p-value']) return permutation_df[col_order]
def handle_hotmaps_results(permutation_result)
Takes in output from multiprocess_permutation function and converts to a better formatted dataframe. Parameters ---------- permutation_result : list output from multiprocess_permutation Returns ------- permutation_df : pd.DataFrame formatted output suitable to save
3.32995
3.406957
0.977397
mycols = ['gene', 'num recurrent', 'normalized graph-smoothed position entropy', 'normalized graph-smoothed position entropy p-value', 'Total Mutations', 'Unmapped to Ref Tx'] permutation_df = pd.DataFrame(permutation_result, columns=mycols) # get benjamani hochberg adjusted p-values permutation_df['normalized graph-smoothed position entropy BH q-value'] = mypval.bh_fdr(permutation_df['normalized graph-smoothed position entropy p-value']) # order output permutation_df = permutation_df.set_index('gene', drop=False) # make sure genes are indices col_order = ['gene', 'Total Mutations', 'Unmapped to Ref Tx', 'num recurrent', 'normalized graph-smoothed position entropy', 'normalized graph-smoothed position entropy p-value', 'normalized graph-smoothed position entropy BH q-value'] permutation_df = permutation_df.sort_values(by=['normalized graph-smoothed position entropy p-value']) return permutation_df[col_order]
def handle_protein_results(permutation_result)
Takes in output from multiprocess_permutation function and converts to a better formatted dataframe. Parameters ---------- permutation_result : list output from multiprocess_permutation Returns ------- permutation_df : pd.DataFrame formatted output suitable to save
4.213128
4.38256
0.961339
mycols = ['gene', 'num recurrent', 'num inactivating', 'entropy-on-effect', 'entropy-on-effect p-value', 'Total Mutations', 'Unmapped to Ref Tx'] permutation_df = pd.DataFrame(sorted(permutation_result, key=lambda x: x[4] if x[4] is not None else 1.1), columns=mycols) # get benjamani hochberg adjusted p-values permutation_df['entropy-on-effect BH q-value'] = mypval.bh_fdr(permutation_df['entropy-on-effect p-value']) # order output permutation_df = permutation_df.set_index('gene', drop=False) # make sure genes are indices permutation_df['num recurrent'] = permutation_df['num recurrent'].fillna(-1).astype(int) # fix dtype isssue col_order = ['gene', 'Total Mutations', 'Unmapped to Ref Tx', 'num recurrent', 'num inactivating', 'entropy-on-effect', 'entropy-on-effect p-value', 'entropy-on-effect BH q-value'] return permutation_df[col_order]
def handle_effect_results(permutation_result)
Takes in output from multiprocess_permutation function and converts to a better formatted dataframe. Parameters ---------- permutation_result : list output from multiprocess_permutation Returns ------- permutation_df : pd.DataFrame formatted output suitable to save
4.723446
4.809519
0.982104
fs_df = compute_indel_length(fs_df) # count the number INDELs with length non-dividable by 3 num_indels = [] indel_len = [] num_categories = 0 i = 1 while(num_categories<bins): # not inframe length indel if i%3: if num_categories != bins-1: tmp_num = len(fs_df[fs_df['indel len']==i]) else: tmp_num = len(fs_df[(fs_df['indel len']>=i) & ((fs_df['indel len']%3)>0)]) num_indels.append(tmp_num) indel_len.append(i) num_categories += 1 i += 1 return indel_len, num_indels
def get_frameshift_info(fs_df, bins)
Counts frameshifts stratified by a given length. Parameters ---------- fs_df : pd.DataFrame indel mutations from non-coding portion bins : int number of different length categories for frameshifts Returns ------- indel_len : list length of specific frameshift length category num_indels : list number of frameshifts matchin indel_len
4.063182
3.628702
1.119734
if mut_type: # user specifies a mutation type self.mutation_type = mut_type else: # mutation type is taken from object attributes if not self.is_valid: # does not correctly fall into a category self.mutation_type = 'not valid' elif self.unknown_effect: self.mutation_type = 'unknown effect' elif self.is_no_protein: self.mutation_type = 'no protein' elif self.is_missing_info: # mutation has a ? self.mutation_type = 'missing' else: # valid mutation type to be counted if self.is_lost_stop: self.mutation_type = 'Nonstop_Mutation' elif self.is_lost_start: self.mutation_type = 'Translation_Start_Site' elif self.is_synonymous: # synonymous must go before missense since mutations # can be categorized as synonymous and missense. Although # in reality such cases are actually synonymous and not # missense mutations. self.mutation_type = 'Silent' elif self.is_missense: self.mutation_type = 'Missense_Mutation' elif self.is_indel: self.mutation_type = 'In_Frame_Indel' elif self.is_nonsense_mutation: self.mutation_type = 'Nonsense_Mutation' elif self.is_frame_shift: self.mutation_type = 'Frame_Shift_Indel'
def set_mutation_type(self, mut_type='')
Sets the mutation type attribute to a single label based on attribute flags. Kwargs: mut_type (str): value to set self.mut_type
3.447447
3.518338
0.979851
aa = aa.upper() # make sure it is upper case aa = aa[2:] if aa.startswith('P.') else aa # strip "p." self.__set_mutation_status() # set flags detailing the type of mutation self.__parse_hgvs_syntax(aa)
def set_amino_acid(self, aa)
Set amino acid change and position.
9.267467
8.335623
1.111791
self.__set_lost_stop_status(hgvs_string) self.__set_lost_start_status(hgvs_string) self.__set_missense_status(hgvs_string) # missense mutations self.__set_indel_status() # indel mutations self.__set_frame_shift_status() # check for fs self.__set_premature_stop_codon_status(hgvs_string)
def __set_mutation_type(self, hgvs_string)
Interpret the mutation type (missense, etc.) and set appropriate flags. Args: hgvs_string (str): hgvs syntax with "p." removed
3.462771
3.70113
0.935598
# set missense status if re.search('^[A-Z?]\d+[A-Z?]$', hgvs_string): self.is_missense = True self.is_non_silent = True else: self.is_missense = False
def __set_missense_status(self, hgvs_string)
Sets the self.is_missense flag.
3.742715
3.450045
1.084831
# set is lost start status mymatch = re.search('^([A-Z?])(\d+)([A-Z?])$', hgvs_string) if mymatch: grps = mymatch.groups() if int(grps[1]) == 1 and grps[0] != grps[2]: self.is_lost_start = True self.is_non_silent = True else: self.is_lost_start = False else: self.is_lost_start = False
def __set_lost_start_status(self, hgvs_string)
Sets the self.is_lost_start flag.
2.901336
2.722131
1.065833
if 'fs' in self.hgvs_original: self.is_frame_shift = True self.is_non_silent = True elif re.search('[A-Z]\d+[A-Z]+\*', self.hgvs_original): # it looks like some mutations dont follow the convention # of using 'fs' to indicate frame shift self.is_frame_shift = True self.is_non_silent = True else: self.is_frame_shift = False
def __set_frame_shift_status(self)
Check for frame shift and set the self.is_frame_shift flag.
4.432447
4.021534
1.102178
lost_stop_pattern = '^\*\d+[A-Z?]+\*?$' if re.search(lost_stop_pattern, hgvs_string): self.is_lost_stop = True self.is_non_silent = True else: self.is_lost_stop = False
def __set_lost_stop_status(self, hgvs_string)
Check if the stop codon was mutated to something other than a stop codon.
3.556805
3.598703
0.988357
if re.search('.+\*(\d+)?$', hgvs_string): self.is_premature_stop_codon = True self.is_non_silent = True # check if it is also a nonsense mutation if hgvs_string.endswith('*'): self.is_nonsense_mutation = True else: self.is_nonsense_mutation = False else: self.is_premature_stop_codon = False self.is_nonsense_mutation = False
def __set_premature_stop_codon_status(self, hgvs_string)
Set whether there is a premature stop codon.
2.678462
2.564105
1.044599
# set indel status if "ins" in self.hgvs_original: # mutation is insertion self.is_insertion = True self.is_deletion = False self.is_indel = True self.is_non_silent = True elif "del" in self.hgvs_original: # mutation is deletion self.is_deletion = True self.is_insertion = False self.is_indel = True self.is_non_silent = True else: # not an indel self.is_deletion = False self.is_insertion = False self.is_indel = False
def __set_indel_status(self)
Sets flags related to the mutation being an indel.
2.01202
1.923238
1.046163
# Standard use by HGVS of indicating unknown effect. unknown_effect_list = ['?', '(=)', '='] # unknown effect symbols if hgvs_string in unknown_effect_list: self.unknown_effect = True elif "(" in hgvs_string: # parethesis in HGVS indicate expected outcomes self.unknown_effect = True else: self.unknown_effect = False # detect if there are missing information. commonly COSMIC will # have insertions with p.?_?ins? or deleteions with ?del indicating # missing information. if "?" in hgvs_string: self.is_missing_info = True else: self.is_missing_info = False
def __set_unkown_effect(self, hgvs_string)
Sets a flag for unkown effect according to HGVS syntax. The COSMIC database also uses unconventional questionmarks to denote missing information. Args: hgvs_string (str): hgvs syntax with "p." removed
7.450627
7.086362
1.051404
no_protein_list = ['0', '0?'] # no protein symbols if hgvs_string in no_protein_list: self.is_no_protein = True self.is_non_silent = True else: self.is_no_protein = False
def __set_no_protein(self, hgvs_string)
Set a flag for no protein expected. ("p.0" or "p.0?") Args: hgvs_string (str): hgvs syntax with "p." removed
3.71568
4.028877
0.922262
mycontexts = context_counts.index.tolist() somatic_base = [base for one_context in mycontexts for base in context_to_mut[one_context]] # calculate the # of batches for simulations max_batch = min(num_permutations, max_batch) num_batches = num_permutations // max_batch remainder = num_permutations % max_batch batch_sizes = [max_batch] * num_batches if remainder: batch_sizes += [remainder] num_sim = 0 null_del_ct = 0 for j, batch_size in enumerate(batch_sizes): # stop iterations if reached sufficient precision if null_del_ct >= stop_criteria: #j = j - 1 break # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), batch_size) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # determine result of random positions for i, row in enumerate(tmp_mut_pos): # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # calc deleterious mutation info tmp_del_count = cutils.calc_deleterious_info(tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], tmp_mut_info['Codon Pos']) # update empricial null distribution if tmp_del_count >= obs_del: null_del_ct += 1 # stop if reach sufficient precision on p-value if null_del_ct >= stop_criteria: break # update number of simulations num_sim += i + 1 #num_sim = j*max_batch + i+1 del_pval = float(null_del_ct) / (num_sim) return del_pval
def deleterious_permutation(obs_del, context_counts, context_to_mut, seq_context, gene_seq, num_permutations=10000, stop_criteria=100, pseudo_count=0, max_batch=25000)
Performs null-permutations for deleterious mutation statistics in a single gene. Parameters ---------- context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest num_permutations : int, default: 10000 number of permutations to create for null pseudo_count : int, default: 0 Pseudo-count for number of deleterious mutations for each permutation of the null distribution. Increasing pseudo_count makes the statistical test more stringent. Returns ------- del_count_list : list list of deleterious mutation counts under the null
4.389572
4.41595
0.994027
mycontexts = context_counts.index.tolist() somatic_base = [base for one_context in mycontexts for base in context_to_mut[one_context]] # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), num_permutations) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # calculate position-based statistics as a result of random positions effect_entropy_list, recur_list, inactivating_list = [], [], [] for row in tmp_mut_pos: # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # calculate position info tmp_entropy, tmp_recur, tmp_inactivating = cutils.calc_effect_info(tmp_mut_info['Codon Pos'], tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], pseudo_count=pseudo_count, is_obs=0) effect_entropy_list.append(tmp_entropy) recur_list.append(tmp_recur) inactivating_list.append(tmp_inactivating) return effect_entropy_list, recur_list, inactivating_list
def effect_permutation(context_counts, context_to_mut, seq_context, gene_seq, num_permutations=10000, pseudo_count=0)
Performs null-permutations for effect-based mutation statistics in a single gene. Parameters ---------- context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest num_permutations : int, default: 10000 number of permutations to create for null pseudo_count : int, default: 0 Pseudo-count for number of recurrent missense mutations for each permutation for the null distribution. Increasing pseudo_count makes the statistical test more stringent. Returns ------- effect_entropy_list : list list of entropy of effect values under the null recur_list : list number of recurrent missense mutations inactivating_list : list number of inactivating mutations
4.344762
3.690628
1.177242
mycontexts = context_counts.index.tolist() somatic_base = [base for one_context in mycontexts for base in context_to_mut[one_context]] # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), num_permutations) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # determine result of random positions non_silent_count_list = [] for row in tmp_mut_pos: # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # calc deleterious mutation info tmp_non_silent = cutils.calc_non_silent_info(tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], tmp_mut_info['Codon Pos']) non_silent_count_list.append(tmp_non_silent) return non_silent_count_list
def non_silent_ratio_permutation(context_counts, context_to_mut, seq_context, gene_seq, num_permutations=10000)
Performs null-permutations for non-silent ratio across all genes. Parameters ---------- context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest num_permutations : int, default: 10000 number of permutations to create for null Returns ------- non_silent_count_list : list of tuples list of non-silent and silent mutation counts under the null
4.758523
4.565446
1.042291
mycontexts = context_counts.index.tolist() somatic_base = [base for one_context in mycontexts for base in context_to_mut[one_context]] # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), num_permutations) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # determine result of random positions gene_name = gene_seq.bed.gene_name gene_len = gene_seq.bed.cds_len summary_info_list = [] for i, row in enumerate(tmp_mut_pos): # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # Get all metrics summarizing each gene tmp_summary = cutils.calc_summary_info(tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], tmp_mut_info['Codon Pos'], gene_name, score_dir, min_frac=min_frac, min_recur=min_recur) # drop silent if needed if drop_silent: # silent mutation count is index 1 tmp_summary[1] = 0 # limit the precision of floats #pos_ent = tmp_summary[-1] #tmp_summary[-1] = '{0:.5f}'.format(pos_ent) summary_info_list.append([gene_name, i+1, gene_len]+tmp_summary) return summary_info_list
def summary_permutation(context_counts, context_to_mut, seq_context, gene_seq, score_dir, num_permutations=10000, min_frac=0.0, min_recur=2, drop_silent=False)
Performs null-permutations and summarizes the results as features over the gene. Parameters ---------- context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest num_permutations : int, default: 10000 number of permutations to create for null drop_silent : bool, default=False Flage on whether to drop all silent mutations. Some data sources do not report silent mutations, and the simulations should match this. Returns ------- summary_info_list : list of lists list of non-silent and silent mutation counts under the null along with information on recurrent missense counts and missense positional entropy.
4.910311
4.486738
1.094406
mycontexts = context_counts.index.tolist() somatic_base, base_context = zip(*[(base, one_context) for one_context in mycontexts for base in context_to_mut[one_context]]) # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), num_permutations) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # info about gene gene_name = gene_seq.bed.gene_name strand = gene_seq.bed.strand chrom = gene_seq.bed.chrom gene_seq.bed.init_genome_coordinates() # map seq pos to genome # determine result of random positions maf_list = [] for row in tmp_mut_pos: # get genome coordinate pos2genome = np.vectorize(lambda x: gene_seq.bed.seqpos2genome[x]+1) genome_coord = pos2genome(row) # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # get string describing variant var_class = cutils.get_variant_classification(tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], tmp_mut_info['Codon Pos']) # prepare output for k, mysomatic_base in enumerate(somatic_base): # format DNA change ref_nuc = tmp_mut_info['Reference Nuc'][k] nuc_pos = row[k] dna_change = 'c.{0}{1}>{2}'.format(ref_nuc, nuc_pos, mysomatic_base) # format protein change ref_aa = tmp_mut_info['Reference AA'][k] somatic_aa = tmp_mut_info['Somatic AA'][k] codon_pos = tmp_mut_info['Codon Pos'][k] protein_change = 'p.{0}{1}{2}'.format(ref_aa, codon_pos, somatic_aa) # reverse complement if on negative strand if strand == '-': ref_nuc = utils.rev_comp(ref_nuc) mysomatic_base = utils.rev_comp(mysomatic_base) # append results if drop_silent and var_class[k].decode() == 'Silent': continue maf_line = [gene_name, strand, chrom, genome_coord[k], genome_coord[k], ref_nuc, mysomatic_base, base_context[k], dna_change, protein_change, var_class[k].decode()] maf_list.append(maf_line) return maf_list
def maf_permutation(context_counts, context_to_mut, seq_context, gene_seq, num_permutations=10000, drop_silent=False)
Performs null-permutations across all genes and records the results in a format like a MAF file. This could be useful for examining the null permutations because the alternative approaches always summarize the results. With the simulated null-permutations, novel metrics can be applied to create an empirical null-distribution. Parameters ---------- context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest num_permutations : int, default: 10000 number of permutations to create for null drop_silent : bool, default=False Flage on whether to drop all silent mutations. Some data sources do not report silent mutations, and the simulations should match this. Returns ------- maf_list : list of tuples list of null mutations with mutation info in a MAF like format
3.492712
3.418955
1.021573
return mark_safe(markdown_module.markdown( force_text(value), extensions=extensions, extension_configs=extension_configs, safe_mode=safe))
def markdown(value, extensions=settings.MARKDOWN_EXTENSIONS, extension_configs=settings.MARKDOWN_EXTENSION_CONFIGS, safe=False)
Render markdown over a given value, optionally using varios extensions. Default extensions could be defined which MARKDOWN_EXTENSIONS option. :returns: A rendered markdown
2.80089
6.308524
0.443985
init_template = loader.get_template( settings.MARKDOWN_EDITOR_INIT_TEMPLATE) options = dict( previewParserPath=reverse('django_markdown_preview'), **settings.MARKDOWN_EDITOR_SETTINGS) options.update(extra_settings) ctx = dict( selector=selector, extra_settings=simplejson.dumps(options) ) return init_template.render(ctx)
def editor_js_initialization(selector, **extra_settings)
Return script tag with initialization code.
4.108282
3.834177
1.07149
if settings.MARKDOWN_PROTECT_PREVIEW: user = getattr(request, 'user', None) if not user or not user.is_staff: from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) return render( request, settings.MARKDOWN_PREVIEW_TEMPLATE, dict( content=request.POST.get('data', 'No content posted'), css=settings.MARKDOWN_STYLE ))
def preview(request)
Render preview page. :returns: A rendered preview
3.523036
3.740073
0.94197
admin.site.unregister(FlatPage) admin.site.register(FlatPage, LocalFlatPageAdmin)
def register()
Register markdown for flatpages.
5.785335
3.943417
1.467087
program_name = __programm_name__ program_version = "v%s" % __version__ program_descrption = __programm_description__ try: # Setup argument parser parser = ArgumentParser(prog=program_name, description=program_descrption) parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="run in verbose mode (-vvv for more, -vvvv to enable connection debugging)") parser.add_argument("-s", "--sudo", action="store_true", help="run supervisorctl actions with sudo (nopasswd))") parser.add_argument("-V", "--version", action="version", version=program_version) parser.add_argument("host-pattern", help="A host-pattern usually refers to a group of hosts. For more details, see Ansible documentation about Patterns.") #parser.add_argument("supervisorctl-action", help="A supervisorctl action (and optional argument). For more details, see Supervisor documentation about the available supervisorctl actions.") subparsers = parser.add_subparsers(help="One of the available supervisorctl actions.", dest="supervisorctl-action") subparsers.add_parser("status", help="Get status info of all processes.") subparsers.add_parser("reread", help="Reread the configuration files of supervisord") subparsers.add_parser("reload", help="Restart remote supervisord") subparsers.add_parser("update", help="Reload the configuration files of supervisord and add/remove processes as necessary") start_subparser = subparsers.add_parser("start", help="Start a process by name") start_subparser.add_argument("process-name", help="Name of the process") stop_subparser = subparsers.add_parser("stop", help="Stop a process by name") stop_subparser.add_argument("process-name", help="Name of the process") restart_subparser = subparsers.add_parser("restart", help="Restart a process by name") restart_subparser.add_argument("process-name", help="Name of the process") remove_subparser = subparsers.add_parser("remove", help="Remove a process by name") remove_subparser.add_argument("process-name", help="Name of the process") # Process arguments args = parser.parse_args(argv) verbose = args.verbose host_pattern = getattr(args, "host-pattern") supervisorctl_action = getattr(args, "supervisorctl-action") sudo = args.sudo ansible_executable = "ansible" supervisorctl_executable = "supervisorctl" ansible_action_option = "-a" if sudo: supervisorctl_command = "sudo " + supervisorctl_executable + " " + supervisorctl_action else: supervisorctl_command = supervisorctl_executable + " " + supervisorctl_action if supervisorctl_action not in ['status', 'reread', 'update', 'reload']: supervisorctl_argument = getattr(args, "process-name") supervisorctl_command = supervisorctl_command + ' ' + supervisorctl_argument if verbose >0: verbose_level = "-"+ "v"*verbose print("Verbose mode on: " + verbose_level) print "Parsed arguments:" print args retcode = call([ansible_executable, host_pattern, verbose_level, ansible_action_option, supervisorctl_command,]) else: retcode = call([ansible_executable, host_pattern, ansible_action_option, supervisorctl_command]) return retcode except KeyboardInterrupt: ### handle keyboard interrupt ### return 0
def main(argv=None)
Command line options.
2.660912
2.654294
1.002493
extensions = (arg and arg.split(',')) or settings.MARKDOWN_EXTENSIONS return _markdown(value, extensions=extensions, safe=False)
def markdown(value, arg=None)
Render markdown over a given value, optionally using varios extensions. Default extensions could be defined which MARKDOWN_EXTENSIONS option. Syntax: :: {{value|markdown}} {{value|markdown:"tables,codehilite"}} :returns: A rendered markdown
6.52211
9.773273
0.667341
extensions = (arg and arg.split(',')) or settings.MARKDOWN_EXTENSIONS return _markdown(value, extensions=extensions, safe=True)
def markdown_safe(value, arg=None)
Render markdown over a given value, optionally using varios extensions. Default extensions could be defined which MARKDOWN_EXTENSIONS option. Enables safe mode, which strips raw HTML and only returns HTML generated by markdown. :returns: A rendered markdown.
6.178503
8.16984
0.756257
return dict( selector=selector, extra_settings=mark_safe(simplejson.dumps( dict(previewParserPath=reverse('django_markdown_preview')))))
def markdown_editor(selector)
Enable markdown editor for given textarea. :returns: Editor template context.
10.5602
13.980116
0.755373
return dict( CSS_SET=posixpath.join( settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css' ), CSS_SKIN=posixpath.join( 'django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN, 'style.css' ) )
def markdown_media_css()
Add css requirements to HTML. :returns: Editor template context.
4.382306
4.984385
0.879207
return _convert( _read_file, _process_file, source, to, format, extra_args, encoding=encoding)
def convert(source, to, format=None, extra_args=(), encoding='utf-8')
Convert given `source` from `format` `to` another. `source` may be either a file path or a string to be converted. It's possible to pass `extra_args` if needed. In case `format` is not provided, it will try to invert the format based on given `source`. Raises OSError if pandoc is not found! Make sure it has been installed and is available at path.
6.121706
9.42179
0.649739
try: p = subprocess.Popen( ['pandoc', '-h'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) except OSError: raise OSError("You probably do not have pandoc installed.") help_text = p.communicate()[0].decode().splitlines(False) txt = ' '.join(help_text[1:help_text.index('Options:')]) aux = txt.split('Output formats: ') in_ = aux[0].split('Input formats: ')[1].split(',') out = aux[1].split(',') return [f.strip() for f in in_], [f.strip() for f in out]
def get_pandoc_formats()
Dynamic preprocessor for Pandoc formats. Return 2 lists. "from_formats" and "to_formats".
3.449136
3.425832
1.006802
html = super(MarkdownWidget, self).render(name, value, attrs, renderer) attrs = self.build_attrs(attrs) html += editor_js_initialization("#%s" % attrs['id']) return mark_safe(html)
def render(self, name, value, attrs=None, renderer=None)
Render widget. :returns: A rendered HTML
3.994495
5.154797
0.774908
md.registerExtension(self) md.preprocessors.add('graphviz_block', InlineGraphvizPreprocessor(md), "_begin")
def extendMarkdown(self, md, md_globals)
Add InlineGraphvizPreprocessor to the Markdown instance.
5.605569
3.338277
1.67918
text = "\n".join(lines) while 1: m = BLOCK_RE.search(text) if m: command = m.group('command') # Whitelist command, prevent command injection. if command not in SUPPORTED_COMMAMDS: raise Exception('Command not supported: %s' % command) filename = m.group('filename') content = m.group('content') filetype = filename[filename.rfind('.')+1:] args = [command, '-T'+filetype] try: proc = subprocess.Popen( args, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE) proc.stdin.write(content.encode('utf-8')) output, err = proc.communicate() if filetype == 'svg': data_url_filetype = 'svg+xml' encoding = 'utf-8' img = output.decode(encoding) if filetype == 'png': data_url_filetype = 'png' encoding = 'base64' output = base64.b64encode(output) data_path = "data:image/%s;%s,%s" % ( data_url_filetype, encoding, output) img = "![" + filename + "](" + data_path + ")" text = '%s\n%s\n%s' % ( text[:m.start()], img, text[m.end():]) except Exception as e: err = str(e) + ' : ' + str(args) return ( '<pre>Error : ' + err + '</pre>' '<pre>' + content + '</pre>').split('\n') else: break return text.split("\n")
def run(self, lines)
Match and generate dot code blocks.
3.223029
3.084988
1.044746
now = calendar.timegm(datetime.datetime.now().timetuple()) if now > self.expiration: auth = self.__open("/oauth/token", data=self.oauth) self.__sethead(auth['access_token']) return self.__open("%s%s" % (self.api, command), headers=self.head, data=data)
def post(self, command, data=None)
Post data to API.
5.462879
5.055112
1.080664
self.access_token = access_token now = calendar.timegm(datetime.datetime.now().timetuple()) self.expiration = now + 1800 self.head = {"Authorization": "Bearer %s" % access_token, "User-Agent": self.user_agent }
def __sethead(self, access_token)
Set HTTP header.
2.961597
2.955369
1.002107
headers = headers or {} if not baseurl: baseurl = self.baseurl req = Request("%s%s" % (baseurl, url), headers=headers) _LOGGER.debug(url) try: req.data = urlencode(data).encode('utf-8') except TypeError: pass opener = build_opener() try: resp = opener.open(req) charset = resp.info().get('charset', 'utf-8') data = json.loads(resp.read().decode(charset)) opener.close() _LOGGER.debug(json.dumps(data)) return data except HTTPError as exception_: if exception_.code == 408: _LOGGER.debug("%s", exception_) return False raise TeslaException(exception_.code)
def __open(self, url, headers=None, data=None, baseurl="")
Use raw urlopen command.
2.773096
2.685262
1.03271
self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_drive_params(self._id) if data: if not data['shift_state'] or data['shift_state'] == 'P': self.__state = True else: self.__state = False
def update(self)
Update the parking brake sensor.
6.351341
5.139165
1.23587
self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_climate_params(self._id) if data: if time.time() - self.__manual_update_time > 60: self.__is_auto_conditioning_on = (data ['is_auto_conditioning_on']) self.__is_climate_on = data['is_climate_on'] self.__driver_temp_setting = (data['driver_temp_setting'] if data['driver_temp_setting'] else self.__driver_temp_setting) self.__passenger_temp_setting = (data['passenger_temp_setting'] if data['passenger_temp_setting'] else self.__passenger_temp_setting) self.__inside_temp = (data['inside_temp'] if data['inside_temp'] else self.__inside_temp) self.__outside_temp = (data['outside_temp'] if data['outside_temp'] else self.__outside_temp) self.__fan_status = data['fan_status']
def update(self)
Update the HVAC state.
2.690922
2.460218
1.093774
temp = round(temp, 1) self.__manual_update_time = time.time() data = self._controller.command(self._id, 'set_temps', {"driver_temp": temp, "passenger_temp": temp}, wake_if_asleep=True) if data['response']['result']: self.__driver_temp_setting = temp self.__passenger_temp_setting = temp
def set_temperature(self, temp)
Set both the driver and passenger temperature to temp.
5.422731
4.638
1.169196
self.__manual_update_time = time.time() if enabled: data = self._controller.command(self._id, 'auto_conditioning_start', wake_if_asleep=True) if data['response']['result']: self.__is_auto_conditioning_on = True self.__is_climate_on = True else: data = self._controller.command(self._id, 'auto_conditioning_stop', wake_if_asleep=True) if data['response']['result']: self.__is_auto_conditioning_on = False self.__is_climate_on = False self.update()
def set_status(self, enabled)
Enable or disable the HVAC.
3.207923
2.919683
1.098723
self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_climate_params(self._id) if data: self.__inside_temp = (data['inside_temp'] if data['inside_temp'] else self.__inside_temp) self.__outside_temp = (data['outside_temp'] if data['outside_temp'] else self.__outside_temp)
def update(self)
Update the temperature.
3.760788
3.424766
1.098115
self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_charging_params(self._id) if data and (time.time() - self.__manual_update_time > 60): if data['charging_state'] != "Charging": self.__charger_state = False else: self.__charger_state = True
def update(self)
Update the charging state of the Tesla Vehicle.
5.303608
4.180938
1.268521
if not self.__charger_state: data = self._controller.command(self._id, 'charge_start', wake_if_asleep=True) if data and data['response']['result']: self.__charger_state = True self.__manual_update_time = time.time()
def start_charge(self)
Start charging the Tesla Vehicle.
7.802256
6.896783
1.131289
if self.__charger_state: data = self._controller.command(self._id, 'charge_stop', wake_if_asleep=True) if data and data['response']['result']: self.__charger_state = False self.__manual_update_time = time.time()
def stop_charge(self)
Stop charging the Tesla Vehicle.
7.964667
7.063333
1.127608
self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_charging_params(self._id) if data and (time.time() - self.__manual_update_time > 60): self.__maxrange_state = data['charge_to_max_range']
def update(self)
Update the status of the range setting.
8.32191
7.4862
1.111633
if not self.__maxrange_state: data = self._controller.command(self._id, 'charge_max_range', wake_if_asleep=True) if data['response']['result']: self.__maxrange_state = True self.__manual_update_time = time.time()
def set_max(self)
Set the charger to max range for trips.
10.756185
8.385867
1.282656
if self.__maxrange_state: data = self._controller.command(self._id, 'charge_standard', wake_if_asleep=True) if data and data['response']['result']: self.__maxrange_state = False self.__manual_update_time = time.time()
def set_standard(self)
Set the charger to standard range for daily commute.
11.729804
9.326102
1.257739
if self.__lock_state: data = self._controller.command(self._id, 'door_unlock', wake_if_asleep=True) if data['response']['result']: self.__lock_state = False self.__manual_update_time = time.time()
def unlock(self)
Unlock the doors and extend handles where applicable.
8.631649
7.802948
1.106204
if not self.__lock_state: data = self._controller.command(self._id, 'charge_port_door_close', wake_if_asleep=True) if data['response']['result']: self.__lock_state = True self.__manual_update_time = time.time()
def lock(self)
Close the charger door.
10.424943
8.296243
1.256586
# pylint: disable=no-self-argument # issue is use of wraps on classmethods which should be replaced: # https://hynek.me/articles/decorators/ @wraps(func) def wrapped(*args, **kwargs): # pylint: disable=too-many-branches,protected-access, not-callable def valid_result(result): try: return (result is not None and result is not False and (result is True or (isinstance(result, dict) and isinstance(result['response'], dict) and ('result' in result['response'] and result['response']['result'] is True) or ('reason' in result['response'] and result['response']['reason'] != 'could_not_wake_buses') or ('result' not in result['response'])))) except TypeError as exception: _LOGGER.error("Result: %s, %s", result, exception) retries = 0 sleep_delay = 2 inst = args[0] vehicle_id = args[1] result = None if (vehicle_id is not None and vehicle_id in inst.car_online and inst.car_online[vehicle_id]): try: result = func(*args, **kwargs) except TeslaException: pass if valid_result(result): return result _LOGGER.debug("wake_up needed for %s -> %s \n" "Info: args:%s, kwargs:%s, " "vehicle_id:%s, car_online:%s", func.__name__, # pylint: disable=no-member result, args, kwargs, vehicle_id, inst.car_online) inst.car_online[vehicle_id] = False while ('wake_if_asleep' in kwargs and kwargs['wake_if_asleep'] and # Check online state (vehicle_id is None or (vehicle_id is not None and vehicle_id in inst.car_online and not inst.car_online[vehicle_id]))): result = inst._wake_up(vehicle_id) _LOGGER.debug("%s(%s): Wake Attempt(%s): %s", func.__name__, # pylint: disable=no-member, vehicle_id, retries, result) if not result: if retries < 5: time.sleep(sleep_delay**(retries+2)) retries += 1 continue else: inst.car_online[vehicle_id] = False raise RetryLimitError else: break # try function five more times retries = 0 while True: try: result = func(*args, **kwargs) _LOGGER.debug("%s(%s): Retry Attempt(%s): %s", func.__name__, # pylint: disable=no-member, vehicle_id, retries, result) except TeslaException: pass finally: retries += 1 time.sleep(sleep_delay**(retries+1)) if valid_result(result): return result if retries >= 5: raise RetryLimitError return wrapped
def wake_up(func)
Wrap a API f so it will attempt to wake the vehicle if asleep. The command f is run once if the vehicle_id was last reported online. Assuming f returns None and wake_if_asleep is True, 5 attempts will be made to wake the vehicle to reissue the command. In addition, if there is a `could_not_wake_buses` error, it will retry the command Args: inst (Controller): The instance of a controller vehicle_id (string): The vehicle to attempt to wake. TODO: This currently requires a vehicle_id, but update() does not; This should also be updated to allow that case wake_if_asleep (bool): Keyword arg to force a vehicle awake. Must be set in the wrapped function f Throws: RetryLimitError
3.217613
2.883716
1.115787
# pylint: disable=unused-argument data = data or {} return self.__connection.post('vehicles/%i/%s' % (vehicle_id, command), data)
def post(self, vehicle_id, command, data=None, wake_if_asleep=True)
Send post command to the vehicle_id. This is a wrapped function by wake_up. Parameters ---------- vehicle_id : string Identifier for the car on the owner-api endpoint. Confusingly it is not the vehicle_id field for identifying the car across different endpoints. https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id command : string Tesla API command. https://tesla-api.timdorr.com/vehicle/commands data : dict Optional parameters. wake_if_asleep : bool Function for wake_up decorator indicating whether a failed response should wake up the vehicle or retry. Returns ------- dict Tesla json object.
6.471847
11.120853
0.581956
# pylint: disable=unused-argument return self.__connection.get('vehicles/%i/%s' % (vehicle_id, command))
def get(self, vehicle_id, command, wake_if_asleep=False)
Send get command to the vehicle_id. This is a wrapped function by wake_up. Parameters ---------- vehicle_id : string Identifier for the car on the owner-api endpoint. Confusingly it is not the vehicle_id field for identifying the car across different endpoints. https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id command : string Tesla API command. https://tesla-api.timdorr.com/vehicle/commands wake_if_asleep : bool Function for wake_up decorator indicating whether a failed response should wake up the vehicle or retry. Returns ------- dict Tesla json object.
7.308344
12.984923
0.562833
return self.get(vehicle_id, 'vehicle_data/%s' % name, wake_if_asleep=wake_if_asleep)['response']
def data_request(self, vehicle_id, name, wake_if_asleep=False)
Get requested data from vehicle_id. Parameters ---------- vehicle_id : string Identifier for the car on the owner-api endpoint. Confusingly it is not the vehicle_id field for identifying the car across different endpoints. https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id name: string Name of data to be requested from the data_request endpoint which rolls ups all data plus vehicle configuration. https://tesla-api.timdorr.com/vehicle/state/data wake_if_asleep : bool Function for underlying api call for whether a failed response should wake up the vehicle or retry. Returns ------- dict Tesla json object.
4.140959
5.549803
0.746145