text stringlengths 0 93.6k |
|---|
# covmat_stops = covmat[np.ix_(elongating_orfs.values, elongating_orfs.values)] |
# orf_strs_stops = orf_strs[elongating_orfs.values] |
# stop_strength_df['stop_strength_nohistop'] = gstop_grps['orf_strength'].aggregate(np.sum) |
# stop_strength_df['W_stop_nohistop'] = pd.Series({gstop:orf_strs_stops[rownums].dot(np.linalg.inv(covmat_stops[np.ix_(rownums,rownums)])) |
# .dot(orf_strs_stops[rownums]) for (gstop, rownums) in gstop_grps.indices.iteritems()}) |
return orf_strength_df, start_strength_df, stop_strength_df |
else: |
return orf_strength_df, start_strength_df |
def _regress_chrom(chrom_to_do): |
"""Applies _regress_tfam() to all of the transcript families on a chromosome""" |
chrom_orfs = pd.read_hdf(opts.orfstore, 'all_orfs', mode='r', where="chrom == %r and tstop > 0 and tcoord > 0" % chrom_to_do, |
columns=['orfname', 'tfam', 'tid', 'tcoord', 'tstop', 'AAlen', 'chrom', 'gcoord', 'gstop', 'strand', |
'codon', 'orftype', 'annot_start', 'annot_stop']) |
# tcoord > 0 removes ORFs where the first codon is an NTG, to avoid an indexing error |
# Those ORFs would never get called anyway since they couldn't possibly have any reads at their start codon |
if opts.exclude: |
chrom_orfs = chrom_orfs[~chrom_orfs['tfam'].isin(opts.exclude)] |
if restrictbystartfilenames: |
restrictedstarts = pd.DataFrame() |
for (restrictbystart, minw) in zip(restrictbystartfilenames, opts.minwstart): |
restrictedstarts = restrictedstarts.append( |
pd.read_hdf(restrictbystart, 'start_strengths', mode='r', where="(chrom == %r) & (W_start > minw)" % chrom_to_do, |
columns=['tfam', 'chrom', 'gcoord', 'strand']), ignore_index=True).drop_duplicates() |
chrom_orfs = chrom_orfs.merge(restrictedstarts) # inner merge acts as a filter |
if chrom_orfs.empty: |
if opts.verbose > 1: |
with log_lock: |
logprint('No ORFs found on %s' % chrom_to_do) |
return failure_return |
inbams = [pysam.Samfile(infile, 'rb') for infile in opts.bamfiles] |
gnd = HashedReadBAMGenomeArray(inbams, ReadKeyMapFactory(Pdict, read_length_nmis)) |
res = tuple([pd.concat(res_dfs) for res_dfs in zip(*[_regress_tfam(tfam_set, gnd) for (tfam, tfam_set) in chrom_orfs.groupby('tfam')])]) |
for inbam in inbams: |
inbam.close() |
if opts.verbose > 1: |
with log_lock: |
logprint('%s complete' % chrom_to_do) |
return res |
with pd.HDFStore(opts.orfstore, mode='r') as orfstore: |
chroms = orfstore.select('all_orfs/meta/chrom/meta').values # because saved as categorical, this is the list of all chromosomes |
if os.path.isfile(metafilename) and not opts.force: |
if opts.verbose: |
logprint('Loading metagene') |
metagene = pd.read_csv(metafilename, sep='\t').set_index(['region', 'position']) |
metagene.columns = metagene.columns.astype(int) # they are read lengths |
assert (metagene.columns == rdlens).all() |
startprof = metagene.loc['START'] |
cdsprof = metagene.loc['CDS'] |
stopprof = metagene.loc['STOP'] |
startnt = (startprof.index.min(), startprof.index.max()+1) |
assert len(cdsprof) == 3 |
stopnt = (stopprof.index.min(), stopprof.index.max()+1) |
startprof = startprof.values.T |
cdsprof = cdsprof.values.T |
stopprof = stopprof.values.T |
else: |
if opts.verbose: |
logprint('Calculating metagene') |
startnt = (-abs(opts.startrange[0])*3, abs(opts.startrange[1])*3) # force <=0 and >= 0 for the bounds |
stopnt = (-abs(opts.stoprange[0])*3, abs(opts.stoprange[1])*3) |
if stopnt[0] >= -6: |
raise ValueError('STOPRANGE must encompass at least 3 codons prior to the stop') |
min_AAlen = (startnt[1]-stopnt[0])/3 # actually should be longer than this to ensure at least one codon in the body |
startlen = startnt[1]-startnt[0] |
stoplen = stopnt[1]-stopnt[0] |
workers = mp.Pool(opts.numproc) |
(startprof, cdsprof, stopprof, num_cds_incl) = [sum(x) for x in zip(*workers.map(_get_annotated_counts_by_chrom, chroms))] |
workers.close() |
startprof /= num_cds_incl # technically not necessary, but helps for consistency of units across samples |
cdsprof /= num_cds_incl |
stopprof /= num_cds_incl |
pd.concat((pd.DataFrame(data=startprof.T, |
index=pd.MultiIndex.from_tuples([('START', x) for x in range(*startnt)], names=['region', 'position']), |
columns=pd.Index(rdlens, name='rdlen')), |
pd.DataFrame(data=cdsprof.T, |
index=pd.MultiIndex.from_tuples([('CDS', x) for x in range(3)], names=['region', 'position']), |
columns=pd.Index(rdlens, name='rdlen')), |
pd.DataFrame(data=stopprof.T, |
index=pd.MultiIndex.from_tuples([('STOP', x) for x in range(*stopnt)], names=['region', 'position']), |
columns=pd.Index(rdlens, name='rdlen')))) \ |
.to_csv(metafilename, sep='\t') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.