text
stringlengths 1
93.6k
|
|---|
abort_set['tstop'] = abort_set['tcoord']+3 # stop after the first codon
|
abort_set['orfname'] = abort_set['gcoord'].apply(lambda x: '%s_%d_abort' % (tfam, x))
|
orf_strength_df = pd.concat((orf_strength_df, abort_set), ignore_index=True)
|
if not opts.startonly: # if marking full ORFs, include histop model
|
stop_set = orf_set.drop_duplicates('gstop').copy()
|
stop_set['gcoord'] = stop_set['gstop'] # this is an easy flag
|
stop_set['tcoord'] = stop_set['tstop'] # should probably be -3 nt, but this is another easy flag that distinguishes from abinit
|
stop_set['orfname'] = stop_set['gstop'].apply(lambda x: '%s_%d_stop' % (tfam, x))
|
orf_strength_df = pd.concat((orf_strength_df, stop_set), ignore_index=True)
|
orf_profs = []
|
indices = []
|
for (tid, tcoord, tstop) in orf_strength_df[['tid', 'tcoord', 'tstop']].itertuples(False):
|
if tcoord != tstop: # not a histop
|
tlen = tlens[tid]
|
if tcoord+startnt[0] < 0:
|
startadj = -startnt[0]-tcoord # number of nts to remove from the start due to short 5' UTR; guaranteed > 0
|
else:
|
startadj = 0
|
if tstop+stopnt[1] > tlen:
|
stopadj = tstop+stopnt[1]-tlen # number of nts to remove from the end due to short 3' UTR; guaranteed > 0
|
else:
|
stopadj = 0
|
curr_indices = tid_indices[tid][tcoord+startnt[0]+startadj:tstop+stopnt[1]-stopadj]
|
orf_profs.append(_orf_profile(tstop-tcoord)[:, startadj:tstop-tcoord+stopnt[1]-startnt[0]-stopadj].ravel())
|
else: # histop
|
curr_indices = tid_indices[tid][tstop-6:tstop]
|
orf_profs.append(stopprof[:, -6:].ravel())
|
indices.append(np.concatenate([nnt*i+curr_indices for i in xrange(len(rdlens))]))
|
# need to tile the indices for each read length
|
if len(indices[-1]) != len(orf_profs[-1]):
|
raise AssertionError('ORF length does not match index length')
|
orf_matrix = scipy.sparse.csc_matrix((np.concatenate(orf_profs),
|
np.concatenate(indices),
|
np.cumsum([0]+[len(curr_indices) for curr_indices in indices])),
|
shape=(nnt*len(rdlens), len(orf_strength_df)))
|
# better to make it a sparse matrix, even though nnls requires a dense matrix, because of linear algebra to come
|
nonzero_orfs = np.flatnonzero(orf_matrix.T.dot(counts) > 0)
|
if len(nonzero_orfs) == 0: # no possibility of anything coming up
|
return failure_return
|
orf_matrix = orf_matrix[:, nonzero_orfs]
|
orf_strength_df = orf_strength_df.iloc[nonzero_orfs] # don't bother fitting ORFs with zero reads throughout their entire length
|
(orf_strs, resid) = nnls(orf_matrix.toarray(), counts)
|
min_str = 1e-6 # allow for machine rounding error
|
usable_orfs = orf_strs > min_str
|
if not usable_orfs.any():
|
return failure_return
|
orf_strength_df = orf_strength_df[usable_orfs]
|
orf_matrix = orf_matrix[:, usable_orfs] # remove entries for zero-strength ORFs or transcripts
|
orf_strs = orf_strs[usable_orfs]
|
orf_strength_df['orf_strength'] = orf_strs
|
covmat = resid*resid*np.linalg.inv(orf_matrix.T.dot(orf_matrix).toarray())/(nnt*len(rdlens)-len(orf_strength_df))
|
# homoscedastic version (assume equal variance at all positions)
|
# resids = counts-orf_matrix.dot(orf_strs)
|
# simple_covmat = np.linalg.inv(orf_matrix.T.dot(orf_matrix).toarray())
|
# covmat = simple_covmat.dot(orf_matrix.T.dot(scipy.sparse.dia_matrix((resids*resids, 0), (len(resids), len(resids))))
|
# .dot(orf_matrix).dot(simple_covmat))
|
# # heteroscedastic version (Eicker-Huber-White robust estimator)
|
orf_strength_df['W_orf'] = orf_strength_df['orf_strength']*orf_strength_df['orf_strength']/np.diag(covmat)
|
orf_strength_df.set_index('orfname', inplace=True)
|
elongating_orfs = ~(orf_strength_df['gstop'] == orf_strength_df['gcoord'])
|
if opts.startonly: # count abortive initiation events towards start strength in this case
|
include_starts = (orf_strength_df['tcoord'] != orf_strength_df['tstop'])
|
if not include_starts.any():
|
return failure_return # no need to keep going if there weren't any useful starts
|
gcoord_grps = orf_strength_df[include_starts].groupby('gcoord')
|
# even if we are willing to count abinit towards start strength, we certainly shouldn't count histop
|
covmat_starts = covmat[np.ix_(include_starts.values, include_starts.values)]
|
orf_strs_starts = orf_strs[include_starts.values]
|
else:
|
if not elongating_orfs.any():
|
return failure_return
|
gcoord_grps = orf_strength_df[elongating_orfs].groupby('gcoord')
|
covmat_starts = covmat[np.ix_(elongating_orfs.values, elongating_orfs.values)]
|
orf_strs_starts = orf_strs[elongating_orfs.values]
|
start_strength_df = pd.DataFrame.from_items([('tfam', tfam),
|
('chrom', orf_set['chrom'].iloc[0]),
|
('strand', orf_set['strand'].iloc[0]),
|
('codon', gcoord_grps['codon'].first()),
|
('start_strength', gcoord_grps['orf_strength'].aggregate(np.sum))])
|
start_strength_df['W_start'] = pd.Series({gcoord: orf_strs_starts[rownums].dot(np.linalg.inv(covmat_starts[np.ix_(rownums, rownums)]))
|
.dot(orf_strs_starts[rownums]) for (gcoord, rownums) in gcoord_grps.indices.iteritems()})
|
if not opts.startonly:
|
# count histop towards the stop codon - but still exclude abinit
|
include_stops = (elongating_orfs | (orf_strength_df['tcoord'] == orf_strength_df['tstop']))
|
gstop_grps = orf_strength_df[include_stops].groupby('gstop')
|
covmat_stops = covmat[np.ix_(include_stops.values, include_stops.values)]
|
orf_strs_stops = orf_strs[include_stops.values]
|
stop_strength_df = pd.DataFrame.from_items([('tfam', tfam),
|
('chrom', orf_set['chrom'].iloc[0]),
|
('strand', orf_set['strand'].iloc[0]),
|
('stop_strength', gstop_grps['orf_strength'].aggregate(np.sum))])
|
stop_strength_df['W_stop'] = pd.Series({gstop: orf_strs_stops[rownums].dot(np.linalg.inv(covmat_stops[np.ix_(rownums, rownums)]))
|
.dot(orf_strs_stops[rownums]) for (gstop, rownums) in gstop_grps.indices.iteritems()})
|
# # nohistop
|
# gstop_grps = orf_strength_df[elongating_orfs].groupby('gstop')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.