text
stringlengths
1
93.6k
gnd = HashedReadBAMGenomeArray(inbams, ReadKeyMapFactory(Pdict, read_length_nmis))
for (tid, tcoord, tstop) in found_cds[['tid', 'tcoord', 'tstop']].itertuples(False):
curr_trans = SegmentChain.from_bed(bedlinedict[tid])
tlen = curr_trans.get_length()
if tlen >= tstop + stopnt[1]: # need to guarantee that the 3' UTR is sufficiently long
curr_hashed_counts = get_hashed_counts(curr_trans, gnd)
cdslen = tstop+stopnt[1]-tcoord-startnt[0] # cds length, plus the extra bases...
curr_counts = np.zeros((len(rdlens), cdslen))
for (i, rdlen) in enumerate(rdlens):
for nmis in range(opts.max5mis+1):
curr_counts[i, :] += curr_hashed_counts[(rdlen, nmis)][tcoord+startnt[0]:tstop+stopnt[1]]
# curr_counts is limited to the CDS plus any extra requested nucleotides on either side
if curr_counts.sum() >= opts.mincdsreads:
curr_counts /= curr_counts.mean() # normalize by mean of counts across all readlengths and positions within the CDS
startprof += curr_counts[:, :startlen]
cdsprof += curr_counts[:, startlen:cdslen-stoplen].reshape((len(rdlens), -1, 3)).mean(1)
stopprof += curr_counts[:, cdslen-stoplen:cdslen]
num_cds_incl += 1
for inbam in inbams:
inbam.close()
return startprof, cdsprof, stopprof, num_cds_incl
def _orf_profile(orflen):
"""Generate a profile for an ORF based on the metagene profile
Parameters
----------
orflen : int
Number of nucleotides in the ORF, including the start and stop codons
Returns
-------
np.ndarray<float>
The expected profile for the ORF. Number of rows will match the number of rows in the metagene profile. Number of columns will be
orflen + stopnt[1] - startnt[0]
"""
assert orflen % 3 == 0
assert orflen > 0
short_stop = 9
if orflen >= startnt[1]-stopnt[0]: # long enough to include everything
return np.hstack((startprof, np.tile(cdsprof, (orflen-startnt[1]+stopnt[0])/3), stopprof))
elif orflen >= startnt[1]+short_stop:
return np.hstack((startprof, stopprof[:, startnt[1]-orflen-stopnt[1]:]))
elif orflen >= short_stop:
return np.hstack((startprof[:, :orflen-short_stop-startnt[0]], stopprof[:, -short_stop-stopnt[1]:]))
else: # very short!
return np.hstack((startprof[:, :3-startnt[0]], stopprof[:, 3-orflen-stopnt[0]:]))
if opts.startonly:
failure_return = (pd.DataFrame(), pd.DataFrame())
else:
failure_return = (pd.DataFrame(), pd.DataFrame(), pd.DataFrame())
def _regress_tfam(orf_set, gnd):
"""Performs non-negative least squares regression on all of the ORFs in a transcript family, using profiles constructed via _orf_profile()
Also calculates Wald statistics for each orf and start codon, and for each stop codon if opts.startonly is False"""
tfam = orf_set['tfam'].iat[0]
strand = orf_set['strand'].iat[0]
chrom = orf_set['chrom'].iat[0]
tids = orf_set['tid'].drop_duplicates().tolist()
all_tfam_genpos = set()
tid_genpos = {}
tlens = {}
for (i, tid) in enumerate(tids):
currtrans = SegmentChain.from_bed(bedlinedict[tid])
curr_pos_set = currtrans.get_position_set()
tlens[tid] = len(curr_pos_set)
tid_genpos[tid] = curr_pos_set
all_tfam_genpos.update(curr_pos_set)
tfam_segs = SegmentChain(*positionlist_to_segments(chrom, strand, list(all_tfam_genpos)))
all_tfam_genpos = np.array(sorted(all_tfam_genpos))
if strand == '-':
all_tfam_genpos = all_tfam_genpos[::-1]
nnt = len(all_tfam_genpos)
tid_indices = {tid: np.flatnonzero(np.in1d(all_tfam_genpos, list(curr_tid_genpos), assume_unique=True))
for (tid, curr_tid_genpos) in tid_genpos.iteritems()}
hashed_counts = get_hashed_counts(tfam_segs, gnd)
counts = np.zeros((len(rdlens), nnt), dtype=np.float64) # even though they are integer-valued, will need to do float arithmetic
for (i, rdlen) in enumerate(rdlens):
for nmis in range(1+opts.max5mis):
counts[i, :] += hashed_counts[(rdlen, nmis)]
counts = counts.ravel()
if opts.startcount:
# Only include ORFS for which there is at least some minimum reads within one nucleotide of the start codon
offsetmat = np.tile(nnt*np.arange(len(rdlens)), 3) # offsets for each cond, expecting three positions to check for each
# try:
orf_set = orf_set[[(counts[(start_idxes.repeat(len(rdlens))+offsetmat)].sum() >= opts.startcount) for start_idxes in
[tid_indices[tid][tcoord-1:tcoord+2] for (tid, tcoord, tstop) in orf_set[['tid', 'tcoord', 'tstop']].itertuples(False)]]]
if orf_set.empty:
return failure_return
orf_strength_df = orf_set.sort_values('tcoord', ascending=False).drop_duplicates('orfname').reset_index(drop=True)
abort_set = orf_set.drop_duplicates('gcoord').copy()
abort_set['gstop'] = abort_set['gcoord'] # should maybe be +/-3, but then need to worry about splicing - and this is an easy flag