text
stringlengths 1
93.6k
|
|---|
'in that directory. (Default: offsets.txt)')
|
parser.add_argument('--max5mis', type=int, default=1, help='Maximum 5\' mismatches to trim. Reads with more than this number will be excluded.'
|
'(Default: 1)')
|
parser.add_argument('--regressfile', default='regression.h5',
|
help='Filename to which to output the table of regression scores for each ORF. Formatted as pandas HDF (tables generated include '
|
'"start_strengths", "orf_strengths", and "stop_strengths"). If SUBDIR is set, this file will be placed in that directory. '
|
'(Default: regression.h5)')
|
parser.add_argument('--startonly', action='store_true', help='Toggle for datasets collected in the presence of initiation inhibitor (e.g. HARR, '
|
'LTM). If selected, "stop_strengths" will not be calculated or saved.')
|
parser.add_argument('--startrange', type=int, nargs=2, default=[1, 50],
|
help='Region around start codon (in codons) to model explicitly. Ignored if reading metagene from file (Default: 1 50, meaning '
|
'one full codon before the start is modeled, as are the start codon and the 49 codons following it).')
|
parser.add_argument('--stoprange', type=int, nargs=2, default=[7, 0],
|
help='Region around stop codon (in codons) to model explicitly. Ignored if reading metagene from file (Default: 7 0, meaning '
|
'seven full codons before and including the stop are modeled, but none after).')
|
parser.add_argument('--mincdsreads', type=int, default=64,
|
help='Minimum number of reads required within the body of the CDS (and any surrounding nucleotides indicated by STARTRANGE or '
|
'STOPRANGE) for it to be included in the metagene. Ignored if reading metagene from file (Default: 64).')
|
parser.add_argument('--startcount', type=int, default=0,
|
help='Minimum reads at putative translation initiation codon. Useful to reduce computational burden by only considering ORFs '
|
'with e.g. at least 1 read at the start. (Default: 0)')
|
parser.add_argument('--metagenefile', default='metagene.txt',
|
help='File to save metagene profile, OR if the file already exists, it will be used as the input metagene. Formatted as '
|
'tab-delimited text, with position, readlength, value, and type ("START", "CDS", or "STOP"). If SUBDIR is set, this file '
|
'will be placed in that directory. (Default: metagene.txt)')
|
parser.add_argument('--noregress', action='store_true', help='Only generate a metagene (i.e. do not perform any regressions)')
|
parser.add_argument('--exclude', nargs='+', help='Names of transcript families (tfams) to exclude from analysis due to excessive computational time '
|
'or memory footprint (e.g. TTN can be so large that the regression never finishes).')
|
parser.add_argument('-v', '--verbose', action='count', help='Output a log of progress and timing (to stdout). Repeat for higher verbosity level.')
|
parser.add_argument('-p', '--numproc', type=int, default=1, help='Number of processes to run. Defaults to 1 but more recommended if available.')
|
parser.add_argument('-f', '--force', action='store_true',
|
help='Force file overwrite. This will overwrite both METAGENEFILE and REGRESSFILE, if they exist. To overwrite only REGRESSFILE '
|
'(and not the METAGENEFILE), do not invoke this option but simply delete REGRESSFILE.')
|
opts = parser.parse_args()
|
offsetfilename = os.path.join(opts.subdir, opts.offsetfile)
|
metafilename = os.path.join(opts.subdir, opts.metagenefile)
|
regressfilename = os.path.join(opts.subdir, opts.regressfile)
|
if not opts.force:
|
if os.path.exists(regressfilename):
|
if os.path.exists(metafilename):
|
raise IOError('%s exists; use --force to overwrite (will also recalculate metagene and overwrite %s)' % (regressfilename, metafilename))
|
raise IOError('%s exists; use --force to overwrite' % regressfilename)
|
restrictbystartfilenames = []
|
if opts.restrictbystarts:
|
if len(opts.restrictbystarts) > 1 and len(opts.minwstart) == 1:
|
opts.minwstart *= len(opts.restrictbystarts) # expand the list to the same number of arguments
|
if len(opts.minwstart) != len(opts.restrictbystarts):
|
raise ValueError('--minwstart must be given same number of values as --restrictbystarts, or one value for all')
|
for restrictbystart in opts.restrictbystarts:
|
if os.path.isfile(restrictbystart):
|
restrictbystartfilenames.append(restrictbystart)
|
elif os.path.isdir(restrictbystart) and os.path.isfile(os.path.join(restrictbystart, opts.regressfile)):
|
restrictbystartfilenames.append(os.path.join(restrictbystart, opts.regressfile))
|
else:
|
raise IOError('Regression file/directory %s not found' % restrictbystart)
|
if opts.verbose:
|
sys.stdout.write(' '.join(sys.argv) + '\n')
|
def logprint(nextstr):
|
sys.stdout.write('[%s] %s\n' % (strftime('%Y-%m-%d %H:%M:%S'), nextstr))
|
sys.stdout.flush()
|
log_lock = mp.Lock()
|
rdlens = []
|
Pdict = {}
|
with open(offsetfilename, 'rU') as infile:
|
for line in infile:
|
ls = line.strip().split()
|
rdlen = int(ls[0])
|
for nmis in range(opts.max5mis+1):
|
Pdict[(rdlen, nmis)] = int(ls[1])+nmis # e.g. if nmis == 1, offset as though the read were missing that base entirely
|
rdlens.append(rdlen)
|
# Pdict = {(int(ls[0]), nmis): int(ls[1])+nmis for ls in [line.strip().split() for line in infile] for nmis in range(opts.max5mis+1)}
|
# Pdict = {(ls[0], nmis): ls[1] for ls in [line.strip().split() for line in infile] if opts.maxrdlen >= ls[0] >= opts.minrdlen
|
# for nmis in range(opts.max5mis+1)}
|
rdlens.sort()
|
# hash transcripts by ID for easy reference later
|
with open(opts.inbed, 'rU') as inbed:
|
bedlinedict = {line.split()[3]: line for line in inbed}
|
def _get_annotated_counts_by_chrom(chrom_to_do):
|
"""Accumulate counts from annotated CDSs into a metagene profile. Only the longest CDS in each transcript family will be included, and only if it
|
meets the minimum number-of-reads requirement. Reads are normalized by gene, so every gene included contributes equally to the final metagene."""
|
found_cds = pd.read_hdf(opts.orfstore, 'all_orfs', mode='r',
|
where="chrom == '%s' and orftype == 'annotated' and tstop > 0 and tcoord > %d and AAlen > %d"
|
% (chrom_to_do, -startnt[0], min_AAlen),
|
columns=['orfname', 'tfam', 'tid', 'tcoord', 'tstop', 'AAlen']) \
|
.sort_values('AAlen', ascending=False).drop_duplicates('tfam') # use the longest annotated CDS in each transcript family
|
num_cds_incl = 0 # number of CDSs included from this chromosome
|
startprof = np.zeros((len(rdlens), startlen))
|
cdsprof = np.zeros((len(rdlens), 3))
|
stopprof = np.zeros((len(rdlens), stoplen))
|
inbams = [pysam.Samfile(infile, 'rb') for infile in opts.bamfiles]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.