code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
new_reads = defaultdict(realign) for r in reads: world = {} sc = 0 for p in reads[r].precursors: world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence)) if sc < world[p]: sc = world[p] new_reads[r] = reads[r] for p in world: logger.debug("score %s %s %s" % (r, p, world[p])) if sc != world[p]: logger.debug("remove %s %s %s" % (r, p, world[p])) new_reads[r].remove_precursor(p) return new_reads
def _clean_hits(reads)
Select only best matches
2.89736
2.763884
1.048293
mode = "r" if bam_fn.endswith("sam") else "rb" handle = pysam.Samfile(bam_fn, mode) reads = defaultdict(realign) for line in handle: chrom = handle.getrname(line.reference_id) # print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom)) query_name = line.query_name if query_name not in reads: reads[query_name].sequence = line.query_sequence iso = isomir() iso.align = line iso.start = line.reference_start iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start) reads[query_name].set_precursor(chrom, iso) reads = _clean_hits(reads) return reads
def _read_bam(bam_fn, precursors)
read bam file and perform realignment of hits
3.5546
3.420289
1.039269
args = argparse.Namespace() args.fastq = in_fn args.minimum = 1 args.out = op.dirname(in_fn) return collapse_fastq(args)
def _collapse_fastq(in_fn)
collapse reads into unique sequences
4.7848
4.241566
1.128074
with open(fn) as handle: reads = defaultdict(realign) for line in handle: query_name, seq, chrom, reference_start, end, mism, add = line.split() reference_start = int(reference_start) # chrom = handle.getrname(cols[1]) # print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom)) if query_name not in reads: reads[query_name].sequence = seq iso = isomir() iso.align = line iso.start = reference_start iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start) logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add)) if len(iso.subs) > 1: continue reads[query_name].set_precursor(chrom, iso) reads = _clean_hits(reads) return reads
def _read_pyMatch(fn, precursors)
read pyMatch file and perform realignment of hits
4.537289
4.394373
1.032522
if subs!="0": subs = [[subs.replace(subs[-2:], ""),subs[-2], subs[-1]]] return subs
def _parse_mut(subs)
Parse mutation tag from miraligner output
16.65823
16.861938
0.987919
reads = defaultdict(realign) with open(fn) as in_handle: in_handle.next() for line in in_handle: cols = line.strip().split("\t") iso = isomir() query_name, seq = cols[1], cols[0] chrom, reference_start = cols[-2], cols[3] iso.mirna = cols[3] subs, add, iso.t5, iso.t3 = cols[6:10] if query_name not in reads: reads[query_name].sequence = seq iso.align = line iso.start = reference_start iso.subs, iso.add = _parse_mut(subs), add logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add)) reads[query_name].set_precursor(chrom, iso) return reads
def _read_miraligner(fn)
Read ouput of miraligner and create compatible output.
4.540689
4.530695
1.002206
tool = _get_miraligner() path_db = op.dirname(op.abspath(hairpin)) cmd = "{tool} -freq -i {fn} -o {out_file} -s {species} -db {path_db} -sub 1 -trim 3 -add 3" if not file_exists(out_file): logger.info("Running miraligner with %s" % fn) do.run(cmd.format(**locals()), "miraligner with %s" % fn) shutil.move(out_file + ".mirna", out_file) return out_file
def _cmd_miraligner(fn, out_file, species, hairpin, out)
Run miraligner for miRNA annotation
4.033125
3.987603
1.011416
args = argparse.Namespace() args.hairpin = hairpin args.sps = species args.gtf = gff3 args.add_extra = True args.files = out_files args.format = "seqbuster" args.out_format = "gff" args.out = out reader(args)
def _mirtop(out_files, hairpin, gff3, species, out)
Convert miraligner to mirtop format
4.979403
4.967652
1.002365
df = pd.concat(dts) ma = df.pivot(index='isomir', columns='sample', values='counts') ma_mirna = ma ma = ma.fillna(0) ma_mirna['mirna'] = [m.split(":")[0] for m in ma.index.values] ma_mirna = ma_mirna.groupby(['mirna']).sum() ma_mirna = ma_mirna.fillna(0) return ma, ma_mirna
def _merge(dts)
merge multiple samples in one matrix
3.701982
3.477748
1.064477
ma, ma_mirna = _merge(out_dts) out_ma = op.join(out_dir, "counts.tsv") out_ma_mirna = op.join(out_dir, "counts_mirna.tsv") ma.to_csv(out_ma, sep="\t") ma_mirna.to_csv(out_ma_mirna, sep="\t") return out_ma_mirna, out_ma
def _create_counts(out_dts, out_dir)
Summarize results into single files.
2.403386
2.306927
1.041813
hairpin, mirna = _download_mirbase(args) precursors = _read_precursor(args.hairpin, args.sps) matures = _read_mature(args.mirna, args.sps) gtf = _read_gtf(args.gtf) out_dts = [] out_files = [] for bam_fn in args.files: sample = op.splitext(op.basename(bam_fn))[0] logger.info("Reading %s" % bam_fn) if bam_fn.endswith("bam") or bam_fn.endswith("sam"): bam_fn = _sam_to_bam(bam_fn) bam_sort_by_n = op.splitext(bam_fn)[0] + "_sort" pysam.sort("-n", bam_fn, bam_sort_by_n) reads = _read_bam(bam_sort_by_n + ".bam", precursors) elif bam_fn.endswith("fasta") or bam_fn.endswith("fa") or \ bam_fn.endswith("fastq"): if args.collapse: bam_fn = _collapse_fastq(bam_fn) out_file = op.join(args.out, sample + ".premirna") bam_fn = _filter_seqs(bam_fn) if args.miraligner: _cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out) reads = _read_miraligner(out_file) out_files.append(out_file) else: raise ValueError("Format not recognized.") if args.miraligner: _mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out) if not args.miraligner: reads = _annotate(reads, matures, precursors) out_file = op.join(args.out, sample + ".mirna") out_file, dt, dt_pre = _tab_output(reads, out_file, sample) try: vcf_file = op.join(args.out, sample + ".vcf") if not file_exists(vcf_file): # if True: create_vcf(dt_pre, matures, gtf, vcf_file) try: import vcf vcf.Reader(filename=vcf_file) except Exception as e: logger.warning(e.__doc__) logger.warning(e.message) except Exception as e: # traceback.print_exc() logger.warning(e.__doc__) logger.warning(e.message) if isinstance(dt, pd.DataFrame): out_dts.append(dt) if out_dts: _create_counts(out_dts, args.out) else: print("No files analyzed!")
def miraligner(args)
Realign BAM hits to miRBAse to get better accuracy and annotation
2.98252
2.960218
1.007534
cur_dir = os.getcwd() _mkdir(new_dir) os.chdir(new_dir) try: yield finally: os.chdir(cur_dir)
def chdir(new_dir)
stolen from bcbio. Context manager to temporarily change to a new directory. http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
2.348625
2.607107
0.900855
target = op.join("seqcluster", "flavor") url = "https://github.com/lpantano/seqcluster.git" if not os.path.exists(target): # shutil.rmtree("seqcluster") subprocess.check_call(["git", "clone","-b", "flavor", "--single-branch", url]) return op.abspath(target)
def _get_flavor()
Download flavor from github
5.683962
5.040513
1.127656
try: from bcbio import install as bcb except: raise ImportError("It needs bcbio to do the quick installation.") path_flavor = _get_flavor() s = {"fabricrc_overrides": {"system_install": path, "local_install": os.path.join(path, "local_install"), "use_sudo": "false", "edition": "minimal"}} s = {"flavor": path_flavor, # "target": "[brew, conda]", "vm_provider": "novm", "hostname": "localhost", "fabricrc_overrides": {"edition": "minimal", "use_sudo": "false", "keep_isolated": "true", "conda_cmd": bcb._get_conda_bin(), "distribution": "__auto__", "dist_name": "__auto__"}} s["actions"] = ["install_biolinux"] s["fabricrc_overrides"]["system_install"] = path s["fabricrc_overrides"]["local_install"] = os.path.join(path, "local_install") cbl = bcb.get_cloudbiolinux(bcb.REMOTES) sys.path.insert(0, cbl["dir"]) cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"]) cbl_deploy.deploy(s)
def _install(path, args)
small helper for installation in case outside bcbio
6.625754
6.442813
1.028395
try: from bcbio import install as bcb except: raise ImportError("It needs bcbio to do the quick installation.") bio_data = op.join(path_flavor, "../biodata.yaml") s = {"flavor": path_flavor, # "target": "[brew, conda]", "vm_provider": "novm", "hostname": "localhost", "fabricrc_overrides": {"edition": "minimal", "use_sudo": "false", "keep_isolated": "true", "conda_cmd": bcb._get_conda_bin(), "distribution": "__auto__", "dist_name": "__auto__"}} s["actions"] = ["setup_biodata"] s["fabricrc_overrides"]["data_files"] = data_dir s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy") cbl = bcb.get_cloudbiolinux(bcb.REMOTES) s["genomes"] = _get_biodata(bio_data, args) sys.path.insert(0, cbl["dir"]) cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"]) cbl_deploy.deploy(s)
def _install_data(data_dir, path_flavor, args)
Upgrade required genome data files in place.
7.701666
7.556688
1.019185
logger.info(args) logger.info("reading sequeces") out_file = os.path.abspath(os.path.splitext(args.json)[0] + "_prediction.json") data = load_data(args.json) out_dir = os.path.abspath(safe_dirs(os.path.join(args.out, "predictions"))) logger.info("make predictions") data = is_tRNA(data, out_dir, args) if args.coral: logger.info("make CoRaL predictions") run_coral(data, out_dir, args) write_data(data[0], out_file) logger.info("Done")
def predictions(args)
Create predictions of clusters
4.584297
4.580088
1.000919
# Original Py 2.7 code #data_loci = map(lambda (x): [x, loci[x].chr, int(loci[x].start), int(loci[x].end), loci[x].strand, len(c.loci2seq[x])], c.loci2seq.keys()) # 2to3 suggested Py 3 rewrite data_loci = [[x, loci[x].chr, int(loci[x].start), int(loci[x].end), loci[x].strand, len(c.loci2seq[x])] for x in list(c.loci2seq.keys())] data_loci = sorted(data_loci, key=itemgetter(5), reverse=True) return data_loci
def sort_precursor(c, loci)
Sort loci according to number of sequences mapped there.
2.790012
2.710421
1.029365
data_loci = sort_precursor(clus, loci) current_size = data_loci[0][5] best = 0 for item, locus in enumerate(data_loci): if locus[3] - locus[2] > 70: if locus[5] > current_size * 0.8: best = item break best_loci = data_loci[best] del data_loci[best] data_loci.insert(0, best_loci) return data_loci
def best_precursor(clus, loci)
Select best precursor asuming size around 100 nt
3.029814
2.822393
1.073491
_, ext = os.path.splitext(in_file) if ext == ".gz": return gzip.open(in_file, 'rb') if ext in [".fastq", ".fq"]: return open(in_file, 'r') # default to just opening it return open(in_file, "r")
def _open_file(in_file)
From bcbio code
2.548833
2.278496
1.118647
with open(out, 'w') as out_handle: print(_create_header(mirna, snp, out), file=out_handle, end="") snp_in_mirna = pybedtools.BedTool(snp).intersect(pybedtools.BedTool(mirna), wo=True) for single in snp_in_mirna: if single[10] == "miRNA" and len(single[3]) + len(single[4]) == 2: line = [] rel_p = _lift_positions(single) line.append(_get_mirna_name(single[16])) line.append(str(rel_p)) line.append(single[2]) line.append(_complement(single[3], single[14])) line.append(_complement(single[4], single[14])) line.append(single[5]) line.append(single[6]) line.append(single[7]) print("\t".join(line), file=out_handle, end="") return out
def select_snps(mirna, snp, out)
Use bedtools to intersect coordinates
2.834905
2.773764
1.022043
if 1.0 * x/s >= p: return True elif stat.binom_test(x, s, p) > 0.01: return True return False
def up_threshold(x, s, p)
function to decide if similarity is below cutoff
4.52296
5.382501
0.840308
scores = [] for start in range(0, len(positions) - 17, 5): end = start = 17 scores.add(_enrichment(positions[start:end], positions[:start], positions[end:]))
def _scan(positions)
get the region inside the vector with more expression
7.15579
6.882535
1.039703
args = _check_args(args) read_stats_file = op.join(args.dir_out, "read_stats.tsv") if file_exists(read_stats_file): os.remove(read_stats_file) bam_file, seq_obj = _clean_alignment(args) logger.info("Parsing matrix file") seqL, y, l = parse_ma_file(seq_obj, args.ffile) # y, l = _total_counts(seqL.keys(), seqL) logger.info("counts after: %s" % sum(y.values())) logger.info("# sequences after: %s" % l) dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()}) dt['step'] = 'aligned' dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a') if len(seqL.keys()) < 10: logger.error("It seems you have low coverage. Please check your fastq files have enough sequences.") raise ValueError("So few sequences.") logger.info("Cleaning bam file") y, l = _total_counts(seqL.keys(), seqL) logger.info("counts after: %s" % sum(y.values())) logger.info("# sequences after: %s" % l) dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()}) dt['step'] = 'cleaned' dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a') clusL = _create_clusters(seqL, bam_file, args) y, l = _total_counts(clusL.seq.keys(), clusL.seq, aligned=True) logger.info("counts after: %s" % sum(y.values())) logger.info("# sequences after: %s" % l) dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()}) dt['step'] = 'clusters' dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a') logger.info("Solving multi-mapping events in the network of clusters") clusLred = _cleaning(clusL, args.dir_out) y, l = _total_counts(clusLred.clus, seqL) logger.info("counts after: %s" % sum(y.values())) logger.info("# sequences after: %s" % l) dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()}) dt['step'] = 'meta-cluster' dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a') logger.info("Clusters up to %s" % (len(clusLred.clus.keys()))) if args.show: logger.info("Creating sequences alignment to precursor") clusLred = show_seq(clusLred, args.index) clusLred = peak_calling(clusLred) clusLred = _annotate(args, clusLred) logger.info("Creating json and count matrix") json_file = _create_json(clusLred, args) logger.info("Output file in: %s" % args.dir_out) if args.db: name = args.db + ".db" logger.info("Create database: database/" + name) data = load_data(json_file) out_dir = op.join(args.dir_out, "database") make_database(data, name, out_dir) logger.info("Finished")
def cluster(args)
Creating clusters
2.921937
2.915419
1.002236
logger.info("Checking parameters and files") args.dir_out = args.out args.samplename = "pro" global decision_cluster global similar if not os.path.isdir(args.out): logger.warning("the output folder doens't exists") os.mkdirs(args.out) if args.bed and args.gtf: logger.error("cannot provide -b and -g at the same time") raise SyntaxError if args.debug: logger.info("DEBUG messages will be showed in file.") if args.bed: args.list_files = args.bed args.type_ann = "bed" if args.gtf: args.list_files = args.gtf args.type_ann = "gtf" logger.info("Output dir will be: %s" % args.dir_out) if not all([file_exists(args.ffile), file_exists(args.afile)]): logger.error("I/O error: Seqs.ma or Seqs.bam. ") raise IOError("Seqs.ma or/and Seqs.bam doesn't exists.") if hasattr(args, 'list_files'): beds = args.list_files.split(",") for filebed in beds: if not file_exists(filebed): logger.error("I/O error: {0}".format(filebed)) raise IOError("%s annotation files doesn't exist" % filebed) param.decision_cluster = args.method if args.similar: param.similar = float(args.similar) if args.min_seqs: param.min_seqs = int(args.min_seqs) return args
def _check_args(args)
check arguments before starting analysis.
4.517556
4.407618
1.024943
total = Counter() if isinstance(seqs, list): if not aligned: l = len([total.update(seqL[s].freq) for s in seqs]) else: l = len([total.update(seqL[s].freq) for s in seqs if seqL[s].align > 0]) elif isinstance(seqs, dict): [total.update(seqs[s].get_freq(seqL)) for s in seqs] l = sum(len(seqs[s].idmembers) for s in seqs) return total, l
def _total_counts(seqs, seqL, aligned=False)
Counts total seqs after each step
3.219875
3.181666
1.012009
data_ann_temp = {} data_ann = [] counts = Counter() for lid in c.loci2seq: # original Py 2.7 code #for dbi in loci[lid].db_ann.keys(): # data_ann_temp[dbi] = {dbi: map(lambda (x): loci[lid].db_ann[dbi].ann[x].name, loci[lid].db_ann[dbi].ann.keys())} # suggestion by 2to3 for dbi in list(loci[lid].db_ann.keys()): data_ann_temp[dbi] = {dbi: [loci[lid].db_ann[dbi].ann[x].name for x in list(loci[lid].db_ann[dbi].ann.keys())]} logger.debug("_json_: data_ann_temp %s %s" % (dbi, data_ann_temp[dbi])) counts[dbi] += 1 # original Py 2.7 code #data_ann = data_ann + map(lambda (x): data_ann_temp[x], data_ann_temp.keys()) # suggestion by 2to3 data_ann = data_ann + [data_ann_temp[x] for x in list(data_ann_temp.keys())] logger.debug("_json_: data_ann %s" % data_ann) counts = {k: v for k, v in counts.iteritems()} total_loci = sum([counts[db] for db in counts]) valid_ann = [k for k, v in counts.iteritems() if up_threshold(v, total_loci, 0.7)] return data_ann, valid_ann
def _get_annotation(c, loci)
get annotation of transcriptional units
2.869937
2.845091
1.008733
n = len(seqs_freq[seqs_freq.keys()[0]].freq.keys()) y = np.array([0] * n) for s in seqs_freq: x = seqs_freq[s].freq exp = [seqs_freq[s].freq[sam] for sam in samples_order] y = list(np.array(exp) + y) return y
def _sum_by_samples(seqs_freq, samples_order)
Sum sequences of a metacluster by samples.
3.409944
3.342698
1.020117
logger.info("Creating bed file") bedfile = generate_position_bed(setclus) a = pybedtools.BedTool(bedfile, from_string=True) beds = [] logger.info("Annotating clusters") if hasattr(args, 'list_files'): beds = args.list_files.split(",") for filebed in beds: logger.info("Using %s " % filebed) db = os.path.basename(filebed) b = pybedtools.BedTool(filebed) c = a.intersect(b, wo=True) setclus = anncluster(c, setclus, db, args.type_ann, args.feature_id) return setclus
def _annotate(args, setclus)
annotate transcriptional units with gtf/bed files provided by -b/g option
4.443424
4.562885
0.973819
logger.info("Clean bam file with highly repetitive reads with low counts. sum(counts)/n_hits > 1%") bam_file, seq_obj = clean_bam_file(args.afile, args.mask) logger.info("Using %s file" % bam_file) detect_complexity(bam_file, args.ref, args.out) return bam_file, seq_obj
def _clean_alignment(args)
Prepare alignment for cluster detection.
9.340625
9.165415
1.019116
clus_obj = [] cluster_file = op.join(args.out, "cluster.bed") if not os.path.exists(op.join(args.out, 'list_obj.pk')): if not file_exists(cluster_file): logger.info("Parsing aligned file") logger.info("Merging sequences") bedtools = os.path.join(os.path.dirname(sys.executable), "bedtools") bedtools = bedtools if os.path.exists(bedtools) else "bedtools" parse_cmd = "awk '{i=i+1;print $1\"\\t\"$2\"\\t\"$3\"\\t\"$4\"\\t\"i\"\\t\"$6}'" cmd = "{bedtools} bamtobed -i {bam_file} | {parse_cmd} | {bedtools} cluster -s -d 20 -i - > {cluster_file}" do.run(cmd.format(**locals())) c = pybedtools.BedTool(cluster_file) logger.info("Creating clusters") clus_obj = detect_clusters(c, seqL, args.min_seqs, args.non_un_gl) with open(op.join(args.out, 'list_obj.pk'), 'wb') as output: pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL) else: logger.info("Loading previous clusters") with open(op.join(args.out, 'list_obj.pk'), 'rb') as input: clus_obj = pickle.load(input) # bedfile = pybedtools.BedTool(generate_position_bed(clus_obj), from_string=True) # seqs_2_loci = bedfile.intersect(pybedtools.BedTool(aligned_bed, from_string=True), wo=True, s=True) # seqs_2_position = add_seqs_position_to_loci(seqs_2_loci, seqL) logger.info("%s clusters found" % (len(clus_obj.clusid))) return clus_obj
def _create_clusters(seqL, bam_file, args)
Cluster sequences and create metaclusters with multi-mappers.
3.050104
3.082741
0.989413
backup = op.join(path, "list_obj_red.pk") if not op.exists(backup): clus_obj = reduceloci(clusL, path) with open(backup, 'wb') as output: pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL) return clus_obj else: logger.info("Loading previous reduced clusters") with open(backup, 'rb') as in_handle: clus_obj = pickle.load(in_handle) return clus_obj
def _cleaning(clusL, path)
Load saved cluster and jump to next step
3.39257
3.291238
1.030788
logger.info("reading sequeces") data = load_data(args.json) logger.info("get sequences from json") #get_sequences_from_cluster() c1, c2 = args.names.split(",") seqs, names = get_sequences_from_cluster(c1, c2, data[0]) loci = get_precursors_from_cluster(c1, c2, data[0]) logger.info("map all sequences to all loci") print("%s" % (loci)) map_to_precursors(seqs, names, loci, os.path.join(args.out, "map.tsv"), args) #map_sequences_w_bowtie(sequences, precursors) logger.info("plot sequences on loci") #get_matrix_position() #plot_sequences() logger.info("Done")
def explore(args)
Create mapping of sequences of two clusters
5.527276
5.127839
1.077896
try: f = open(args.config, 'r') seq_out = open(op.join(args.out, "seqs.fastq"), 'w') ma_out = open(op.join(args.out, "seqs.ma"), 'w') except IOError as e: traceback.print_exc() raise IOError("Can not create output files: %s, %s or read %s" % (op.join(args.out, "seqs.ma"), op.join(args.out, "seqs.fastq"), args.config)) logger.info("Reading sequeces") seq_l, sample_l = _read_fastq_files(f, args) logger.info("Creating matrix with unique sequences") logger.info("Filtering: min counts %s, min size %s, max size %s, min shared %s" % (args.minc, args.minl, args.maxl, args.min_shared)) _create_matrix_uniq_seq(sample_l, seq_l, ma_out, seq_out, args.min_shared) logger.info("Finish preprocessing. Get a sorted BAM file of seqs.fa and run seqcluster cluster.")
def prepare(args)
Read all seq.fa files and create a matrix and unique fasta files. The information is :param args: options parsed from command line :param con: logging messages going to console :param log: logging messages going to console and file :returns: files - matrix and fasta files that should be used with and aligner (as bowtie) and run `seqcluster cluster`
4.464558
4.058951
1.099929
seq_l = {} sample_l = [] idx = 1 for line1 in f: line1 = line1.strip() cols = line1.split("\t") with open(cols[0], 'r') as fasta: sample_l.append(cols[1]) for line in fasta: if line.startswith(">"): idx += 1 counts = int(re.search("x([0-9]+)", line.strip()).group(1)) else: seq = line.strip() seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq if counts > int(args.minc) and len(seq) > int(args.minl): if seq not in seq_l: seq_l[seq] = sequence_unique(idx, seq) seq_l[seq].add_exp(cols[1], counts) return seq_l, sample_l
def _read_fasta_files(f, args)
read fasta files of each sample and generate a seq_obj with the information of each unique sequence in each sample :param f: file containing the path for each fasta file and the name of the sample. Two column format with `tab` as field separator :returns: * :code:`seq_l`: is a list of seq_obj objects, containing the information of each sequence * :code:`sample_l`: is a list with the name of the samples (column two of the config file)
3.023048
2.887936
1.046785
seq_l = {} sample_l = [] idx = 1 p = re.compile("^[ATCGNU]+$") with open(op.join(args.out, "stats_prepare.tsv"), 'w') as out_handle: for line1 in f: line1 = line1.strip() cols = line1.split("\t") # if not is_fastq(cols[0]): # raise ValueError("file is not fastq: %s" % cols[0]) with open_fastq(cols[0]) as handle: sample_l.append(cols[1]) total = added = 0 for line in handle: if line.startswith("@") or line.startswith(">"): seq = handle.next().strip() if not p.match(seq): continue idx += 1 total += 1 keep = {} counts = int(re.search("x([0-9]+)", line.strip()).group(1)) if is_fastq(cols[0]): handle.next().strip() qual = handle.next().strip() else: qual = "I" * len(seq) qual = qual[0:int(args.maxl)] if len(qual) > int(args.maxl) else qual seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq if counts > int(args.minc) and len(seq) > int(args.minl): added += 1 if seq in keep: keep[seq].update(qual) else: keep[seq] = quality(qual) if seq not in seq_l: seq_l[seq] = sequence_unique(idx, seq) seq_l[seq].add_exp(cols[1], counts) seq_l[seq].quality = keep[seq].get() print("total\t%s\t%s" % (idx, cols[1]), file=out_handle, end="") print("added\t%s\t%s" % (len(seq_l), cols[1]), file=out_handle, end="") logger.info("%s: Total read %s ; Total added %s" % (cols[1], idx, len(seq_l))) return seq_l, sample_l
def _read_fastq_files(f, args)
read fasta files of each sample and generate a seq_obj with the information of each unique sequence in each sample :param f: file containing the path for each fasta file and the name of the sample. Two column format with `tab` as field separator :returns: * :code:`seq_l`: is a list of seq_obj objects, containing the information of each sequence * :code:`sample_l`: is a list with the name of the samples (column two of the config file)
3.079938
2.980741
1.033279
skip = 0 if int(min_shared) > len(sample_l): min_shared = len(sample_l) maout.write("id\tseq") for g in sample_l: maout.write("\t%s" % g) for s in seq_l.keys(): seen = sum([1 for g in seq_l[s].group if seq_l[s].group[g] > 0]) if seen < int(min_shared): skip += 1 continue maout.write("\nseq_%s\t%s" % (seq_l[s].idx, seq_l[s].seq)) for g in sample_l: if g in seq_l[s].group: maout.write("\t%s" % seq_l[s].group[g]) else: maout.write("\t0") qual = "".join(seq_l[s].quality) out.write("@seq_%s\n%s\n+\n%s\n" % (seq_l[s].idx, seq_l[s].seq, qual)) out.close() maout.close() logger.info("Total skipped due to --min-shared parameter (%s) : %s" % (min_shared, skip))
def _create_matrix_uniq_seq(sample_l, seq_l, maout, out, min_shared)
create matrix counts for each different sequence in all the fasta files :param sample_l: :code:`list_s` is the output of :code:`_read_fasta_files` :param seq_l: :code:`seq_s` is the output of :code:`_read_fasta_files` :param maout: is a file handler to write the matrix count information :param out: is a file handle to write the fasta file with unique sequences :returns: Null
2.345613
2.433306
0.963962
if not args.bed: raise ValueError("This module needs the bed file output from cluster subcmd.") workdir = op.abspath(op.join(args.out, 'coral')) safe_dirs(workdir) bam_in = op.abspath(args.bam) bed_in = op.abspath(args.bed) reference = op.abspath(args.ref) with chdir(workdir): bam_clean = coral.prepare_bam(bam_in, bed_in) out_dir = op.join(workdir, "regions") safe_dirs(out_dir) prefix = "seqcluster" loci_file = coral.detect_regions(bam_clean, bed_in, out_dir, prefix) coral.create_features(bam_clean, loci_file, reference, out_dir)
def run_coral(clus_obj, out_dir, args)
Run some CoRaL modules to predict small RNA function
4.099126
4.001481
1.024402
ref = os.path.abspath(args.reference) utils.safe_dirs(out_dir) for nc in clus_obj[0]: c = clus_obj[0][nc] loci = c['loci'] out_fa = "cluster_" + nc if loci[0][3] - loci[0][2] < 500: with make_temp_directory() as tmpdir: os.chdir(tmpdir) get_loci_fasta({loci[0][0]: [loci[0][0:5]]}, out_fa, ref) summary_file, str_file = _run_tRNA_scan(out_fa) if "predictions" not in c: c['predictions'] = {} c['predictions']['tRNA'] = _read_tRNA_scan(summary_file) score = _read_tRNA_scan(summary_file) logger.debug(score) shutil.move(summary_file, op.join(out_dir, summary_file)) shutil.move(str_file, op.join(out_dir, str_file)) else: c['errors'].add("precursor too long") clus_obj[0][nc] = c return clus_obj
def is_tRNA(clus_obj, out_dir, args)
Iterates through cluster precursors to predict sRNA types
3.844319
3.759838
1.022469
score = 0 if os.path.getsize(summary_file) == 0: return 0 with open(summary_file) as in_handle: # header = in_handle.next().strip().split() for line in in_handle: if not line.startswith("--"): pre = line.strip().split() score = pre[-1] return score
def _read_tRNA_scan(summary_file)
Parse output from tRNA_Scan
3.105137
2.953027
1.05151
out_file = fasta_file + "_trnascan" se_file = fasta_file + "_second_str" cmd = "tRNAscan-SE -q -o {out_file} -f {se_file} {fasta_file}" run(cmd.format(**locals())) return out_file, se_file
def _run_tRNA_scan(fasta_file)
Run tRNA-scan-SE to predict tRNA
4.262382
3.75412
1.135388
multiplier = 1 if mut.startswith("-"): mut = mut[1:] multiplier = -1 nt = mut.strip('0123456789') pos = int(mut[:-2]) * multiplier return nt, pos
def _parse_mut(mut)
Parse mutation field to get position and nts.
4.095821
3.195719
1.281659
mut = isomir.split(":")[1] if mut == "0": return mut nt, pos = _parse_mut(mut) trim5 = isomir.split(":")[-2] off = -1 * len(trim5) if trim5.islower(): off = len(trim5) if trim5 == "NA" or trim5 == "0": off = 0 # print(isomir) # print([mut, pos, off, nt]) return "%s%s" % (pos + off, nt)
def _get_reference_position(isomir)
Liftover from isomir to reference mature
5.054957
4.871625
1.037633
pass_pos = [] for isomir in isomirs.iterrows(): mir = isomir[1]["chrom"] mut = isomir[1]["sv"] mut_counts = isomir[1]["counts"] total = mirna.loc[mir, "counts"] * 1.0 - mut_counts mut_diff = isomir[1]["diff"] ratio = mut_counts / total if mut_counts > 10 and ratio > 0.4 and mut != "0" and mut_diff > 1: isomir[1]["ratio"] = ratio pass_pos.append(isomir[1]) return pass_pos
def _get_pct(isomirs, mirna)
Get pct of variants respect to the reference using reads and different sequences
3.95014
3.913053
1.009478
print("##fileformat=VCFv4.2", file=STDOUT, end="") print("##source=seqbuster2.3", file=STDOUT, end="") print("##reference=mirbase", file=STDOUT, end="") for pos in data: print("##contig=<ID=%s>" % pos["chrom"], file=STDOUT, end="") print('##INFO=<ID=ID,Number=1,Type=String,Description="miRNA name">', file=STDOUT, end="") print('##FORMAT=<ID=GT,Number=1,Type=Integer,Description="Genotype">', file=STDOUT, end="") print('##FORMAT=<ID=NR,Number=A,Type=Integer,Description="Total reads supporting the variant">', file=STDOUT, end="") print('##FORMAT=<ID=NS,Number=A,Type=Float,Description="Total number of different sequences supporting the variant">', file=STDOUT, end="") print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMP001", file=STDOUT, end="")
def _print_header(data)
Create vcf header to make a valid vcf.
2.469331
2.365173
1.044038
id_name = "." qual = "." chrom = data['chrom'] pos = data['pre_pos'] nt_ref = data['nt'][1] nt_snp = data['nt'][0] flt = "PASS" info = "ID=%s" % data['mature'] frmt = "GT:NR:NS" gntp = "%s:%s:%s" % (_genotype(data), data["counts"], data["diff"]) print("\t".join(map(str, [chrom, pos, id_name, nt_ref, nt_snp, qual, flt, info, frmt, gntp])), file=STDOUT, end="")
def print_vcf(data)
Print vcf line following rules.
5.304715
5.167024
1.026648
fixed_pos = [] _print_header(pass_pos) for pos in pass_pos: mir = pos["mature"] db_pos = matures[pos["chrom"]] mut = _parse_mut(pos["sv"]) print([db_pos[mir], mut, pos["sv"]]) pos['pre_pos'] = db_pos[mir][0] + mut[1] - 1 pos['nt'] = list(mut[0]) fixed_pos.append(pos) print_vcf(pos) return fixed_pos
def liftover(pass_pos, matures)
Make position at precursor scale
5.674575
5.606883
1.012073
global STDOUT isomirs['sv'] = [_get_reference_position(m) for m in isomirs["isomir"]] mirna = isomirs.groupby(['chrom']).sum() sv = isomirs.groupby(['chrom', 'mature', 'sv'], as_index=False).sum() sv["diff"] = isomirs.groupby(['chrom', 'mature', 'sv'], as_index=False).size().reset_index().loc[:,0] pass_pos = _get_pct(sv, mirna) if vcf_file: with open(vcf_file, 'w') as out_handle: STDOUT = out_handle pass_pos = liftover(pass_pos, matures) if gtf: vcf_genome_file = vcf_file.replace(".vcf", "_genome.vcf") with open(vcf_genome_file, 'w') as out_handle: STDOUT = out_handle pass_pos = liftover_to_genome(pass_pos, gtf)
def create_vcf(isomirs, matures, gtf, vcf_file=None)
Create vcf file of changes for all samples. PASS will be ones with > 3 isomiRs supporting the position and > 30% of reads, otherwise LOW
3.633986
3.641596
0.99791
fixed_pos = [] for pos in pass_pos: if pos["chrom"] not in gtf: continue db_pos = gtf[pos["chrom"]][0] mut = _parse_mut(pos["sv"]) print([db_pos, pos]) if db_pos[3] == "+": pos['pre_pos'] = db_pos[1] + pos["pre_pos"] + 1 else: pos['pre_pos'] = db_pos[2] - (pos["pre_pos"] - 1) pos['chrom'] = db_pos[0] pos['nt'] = list(mut[0]) fixed_pos.append(pos) _print_header(fixed_pos) for pos in fixed_pos: print_vcf(pos)
def liftover_to_genome(pass_pos, gtf)
Liftover from precursor to genome
3.489021
3.528126
0.988916
already_in = set() not_in = [] already_in = map(seen.get, seqs) # if isinstance(already_in, list): already_in = filter(None, already_in) not_in = set(seqs) - set(seen.keys()) # for s in seqs: # if s in seen: # already_in.add(seen[s]) # else: # not_in.append(s) return list(set(already_in)), list(not_in)
def _get_seqs_from_cluster(seqs, seen)
Returns the sequences that are already part of the cluster :param seqs: list of sequences ids :param clus_id: dict of sequences ids that are part of a cluster :returns: * :code:`already_in`list of cluster id that contained some of the sequences * :code:`not_in`list of sequences that don't belong to any cluster yet
2.933693
2.742152
1.069851
filtered = {} n_cluster = 0 large = 0 current = clus_obj.clusid logger.info("Number of loci: %s" % len(clus_obj.loci.keys())) bar = ProgressBar(maxval=len(current)) bar.start() bar.update(0) for itern, idmc in enumerate(current): bar.update(itern) logger.debug("_reduceloci: cluster %s" % idmc) c = copy.deepcopy(list(current[idmc])) n_loci = len(c) if n_loci < 1000: filtered, n_cluster = _iter_loci(c, clus_obj.clus, (clus_obj.loci, clus_obj.seq), filtered, n_cluster) else: large += 1 n_cluster += 1 _write_cluster(c, clus_obj.clus, clus_obj.loci, n_cluster, path) filtered[n_cluster] = _add_complete_cluster(n_cluster, c, clus_obj.clus) clus_obj.clus = filtered seqs = 0 for idc in filtered: seqs += len(filtered[idc].idmembers) logger.info("seqs in clusters %s" % (seqs)) logger.info("Clusters too long to be analized: %s" % large) logger.info("Number of clusters removed because low number of reads: %s" % REMOVED) logger.info("Number of clusters with conflicts: %s" % CONFLICT) return clus_obj
def reduceloci(clus_obj, path)
reduce number of loci a cluster has :param clus_obj: cluster object object :param path: output path
4.217055
4.225632
0.99797
out_file = op.join(path, 'log', str(idx) + '.bed') with utils.safe_run(out_file): with open(out_file, 'w') as out_handle: for idc in metacluster: for idl in cluster[idc].loci2seq: pos = loci[idl].list() print("\t".join(pos[:4] + [str(len(cluster[idc].loci2seq[idl]))] + [pos[-1]]), file=out_handle, end="")
def _write_cluster(metacluster, cluster, loci, idx, path)
For complex meta-clusters, write all the loci for further debug
4.600345
4.473479
1.02836
global CONFLICT loci = dict(zip(meta, [clusters[idc] for idc in meta])) n_loci = len(meta) n_loci_prev = n_loci + 1 cicle = 0 # [logger.note("BEFORE %s %s %s" % (c.id, idl, len(c.loci2seq[idl]))) for idl in c.loci2seq] internal_cluster = {} if n_loci == 1: n_cluster += 1 filtered[n_cluster] = clusters[meta[0]] filtered[n_cluster].update(id=n_cluster) filtered[n_cluster].set_freq(s2p[1]) while n_loci < n_loci_prev and n_loci != 1: n_loci_prev = n_loci cicle += 1 if (cicle % 1) == 0: logger.debug("_iter_loci:number of cicle: %s with n_loci %s" % (cicle, n_loci)) loci_similarity = _calculate_similarity(loci) internal_cluster = _merge_similar(loci, loci_similarity) n_loci = len(internal_cluster) loci = internal_cluster logger.debug("_iter_loci: n_loci %s" % n_loci) if n_loci > 1: n_internal_cluster = sorted(internal_cluster.keys(), reverse=True)[0] CONFLICT += 1 internal_cluster = _solve_conflict(internal_cluster, s2p, n_internal_cluster) internal_cluster = _clean_cluster(internal_cluster) for idc in internal_cluster: n_cluster += 1 logger.debug("_iter_loci: add to filtered %s" % n_cluster) filtered[n_cluster] = internal_cluster[idc] filtered[n_cluster].id = n_cluster filtered[n_cluster].update(id=n_cluster) filtered[n_cluster].set_freq(s2p[1]) logger.debug("_iter_loci: filtered %s" % filtered.keys()) # for new_c in internal_cluster.values(): # [logger.note("%s %s %s %s" % (meta, new_c.id, idl, len(new_c.loci2seq[idl]))) for idl in new_c.loci2seq] return filtered, n_cluster
def _iter_loci(meta, clusters, s2p, filtered, n_cluster)
Go through all locus and decide if they are part of the same TU or not. :param idx: int cluster id :param s2p: dict with [loci].coverage[start] = # of sequences there :param filtered: dict with clusters object :param n_cluster: int cluster id :return: * filtered: dict of cluster objects * n_cluster: int cluster id
2.848755
2.845489
1.001148
new_dict = {} n_cluster = 0 logger.debug("_convert_to_cluster: loci %s" % c.loci2seq.keys()) for idl in c.loci2seq: n_cluster += 1 new_c = cluster(n_cluster) #new_c.id_prev = c.id new_c.loci2seq[idl] = c.loci2seq[idl] new_dict[n_cluster] = new_c logger.debug("_convert_to_cluster: new ids %s" % new_dict.keys()) return new_dict
def _convert_to_clusters(c)
Return 1 cluster per loci
3.348631
3.003382
1.114953
ma = {} for idc in c: set1 = _get_seqs(c[idc]) [ma.update({(idc, idc2): _common(set1, _get_seqs(c[idc2]), idc, idc2)}) for idc2 in c if idc != idc2 and (idc2, idc) not in ma] # logger.debug("_calculate_similarity_ %s" % ma) return ma
def _calculate_similarity(c)
Get a similarity matrix of % of shared sequence :param c: cluster object :return ma: similarity matrix
4.336503
4.141891
1.046986
seqs = set() for idl in list_idl.loci2seq: # logger.debug("_get_seqs_: loci %s" % idl) [seqs.add(s) for s in list_idl.loci2seq[idl]] # logger.debug("_get_seqs_: %s" % len(seqs)) return seqs
def _get_seqs(list_idl)
get all sequences in a cluster knowing loci
4.043425
3.363977
1.201978
c = len(set(s1).intersection(s2)) t = min(len(s1), len(s2)) pct = 1.0 * c / t * t is_gt = up_threshold(pct, t * 1.0, parameters.similar) logger.debug("_common: pct %s of clusters:%s %s = %s" % (1.0 * c / t, i1, i2, is_gt)) if pct < parameters.similar and is_gt and pct > 0: pct = parameters.similar return pct / t
def _common(s1, s2, i1, i2)
calculate the common % percentage of sequences
6.024745
5.970942
1.009011
all_true1 = all([all([common and loci_similarity[(p, c)] > parameters.similar for p in pairs if (p, c) in loci_similarity]) for c in clus_seen]) all_true2 = all([all([common and loci_similarity[(c, p)] > parameters.similar for p in pairs if (c, p) in loci_similarity]) for c in clus_seen]) return all_true1 * all_true2
def _is_consistent(pairs, common, clus_seen, loci_similarity)
Check if loci shared that match sequences with all clusters seen until now.
2.944128
2.795281
1.053249
n_cluster = 0 internal_cluster = {} clus_seen = {} loci_sorted = sorted(loci_similarity.iteritems(), key=operator.itemgetter(1), reverse=True) for pairs, sim in loci_sorted: common = sim > parameters.similar n_cluster += 1 logger.debug("_merge_similar:try new cluster %s" % n_cluster) new_c = cluster(n_cluster) p_seen, p_unseen = [], [] size = min(len(_get_seqs(loci[pairs[0]])), len(_get_seqs(loci[pairs[1]]))) if common: consistent = _is_consistent(pairs, common, clus_seen, loci_similarity) logger.debug("_merge_similar: clusters seen: %s" % clus_seen) logger.debug("_merge_similar: id %s common %s|%s total %s consistent %s" % (pairs, sim, common, size, consistent)) if not consistent: continue if pairs[0] in clus_seen: p_seen.append(pairs[0]) p_unseen.append(pairs[1]) if pairs[1] in clus_seen: p_seen.append(pairs[1]) p_unseen.append(pairs[0]) if len(p_seen) == 0: new_c = _merge_cluster(loci[pairs[0]], new_c) new_c = _merge_cluster(loci[pairs[1]], new_c) [clus_seen.update({p: n_cluster}) for p in pairs] internal_cluster[n_cluster] = new_c if len(p_seen) == 1: idc_seen = clus_seen[p_seen[0]] internal_cluster[idc_seen] = _merge_cluster(loci[p_unseen[0]], internal_cluster[idc_seen]) clus_seen[p_unseen[0]] = idc_seen else: logger.debug("_merge_similar: id %s %s are different" % pairs) continue internal_cluster.update(_add_unseen(loci, clus_seen, n_cluster)) logger.debug("_merge_similar: total clus %s" % len(internal_cluster.keys())) return internal_cluster
def _merge_similar(loci, loci_similarity)
Internal function to reduce loci complexity :param loci: class cluster :param locilen_sorted: list of loci sorted by size :return c: updated class cluster
2.839397
2.820416
1.00673
logger.debug("_merge_cluster: %s to %s" % (old.id, new.id)) logger.debug("_merge_cluster: add idls %s" % old.loci2seq.keys()) for idl in old.loci2seq: # if idl in new.loci2seq: # new.loci2seq[idl] = list(set(new.loci2seq[idl] + old.loci2seq[idl])) # new.loci2seq[idl] = old.loci2seq[idl] new.add_id_member(old.loci2seq[idl], idl) return new
def _merge_cluster(old, new)
merge one cluster to another
2.849162
2.784822
1.023104
logger.debug("_solve_conflict: count once") if parameters.decision_cluster == "bayes": return decide_by_bayes(list_c, s2p) loci_similarity = _calculate_similarity(list_c) loci_similarity = sorted(loci_similarity.iteritems(), key=operator.itemgetter(1), reverse=True) common = sum([score for p, score in loci_similarity]) while common > 0: n_cluster += 1 logger.debug("_solve_conflict: ma %s" % loci_similarity) pairs = loci_similarity[0][0] score = loci_similarity[0][1] logger.debug("_solve_conflict: common %s, new %s" % (score, n_cluster)) if parameters.decision_cluster.startswith("most-voted"): list_c = _split_cluster_by_most_vote(list_c, pairs) else: list_c = _split_cluster(list_c, pairs, n_cluster) list_c = {k: v for k, v in list_c.iteritems() if len(v.loci2seq) > 0} loci_similarity = _calculate_similarity(list_c) loci_similarity = sorted(loci_similarity.iteritems(), key=operator.itemgetter(1), reverse=True) #logger.note("%s %s" % (pairs, loci_similarity[0][1])) common = sum([score for p, score in loci_similarity]) logger.debug("_solve_conflict: solved clusters %s" % len(list_c.keys())) return list_c
def _solve_conflict(list_c, s2p, n_cluster)
Make sure sequences are counts once. Resolve by most-vote or exclussion :params list_c: dict of objects cluster :param s2p: dict of [loci].coverage = # num of seqs :param n_cluster: number of clusters return dict: new set of clusters
3.157825
2.922246
1.080616
old = c[p[0]] new = c[p[1]] new_c = cluster(n) common = set(_get_seqs(old)).intersection(_get_seqs(new)) for idl in old.loci2seq: in_common = list(set(common).intersection(old.loci2seq[idl])) if len(in_common) > 0: logger.debug("_split_cluster: in_common %s with pair 1" % (len(in_common))) new_c.add_id_member(in_common, idl) old.loci2seq[idl] = list(set(old.loci2seq[idl]) - set(common)) logger.debug("_split_cluster: len old %s with pair 1" % (len(old.loci2seq))) for idl in new.loci2seq: in_common = list(set(common).intersection(new.loci2seq[idl])) if len(in_common) > 0: logger.debug("_split_cluster: in_common %s with pair 2" % (len(in_common))) new_c.add_id_member(in_common, idl) new.loci2seq[idl] = list(set(new.loci2seq[idl]) - set(common)) logger.debug("_split_cluster: len old %s with pair 2" % (len(new.loci2seq))) old.update() new.update() old.loci2seq = {k: v for k, v in old.loci2seq.iteritems() if len(v) > 0} new.loci2seq = {k: v for k, v in new.loci2seq.iteritems() if len(v) > 0} c[n] = new c[p[0]] = old c[p[1]] = new return c
def _split_cluster(c, pairs, n)
split cluster by exclussion
1.988591
1.975989
1.006377
old, new = c[p[0]], c[p[1]] old_size = _get_seqs(old) new_size = _get_seqs(new) logger.debug("_most_vote: size of %s with %s - %s with %s" % (old.id, len(old_size), new.id, len(new_size))) if len(old_size) > len(new_size): keep, remove = old, new else: keep, remove = new, old common = list(set(old_size).intersection(new_size)) logger.debug("_most_vote: keep %s remove %s with common %s" % (keep.id, remove.id, len(common))) for idl in remove.loci2seq: if len(common) > 0: remove.loci2seq[idl] = list(set(remove.loci2seq[idl]) - set(common)) keep.loci2seq = {k: v for k, v in keep.loci2seq.iteritems() if len(v) > 0} remove.loci2seq = {k: v for k, v in remove.loci2seq.iteritems() if len(v) > 0} keep.update() remove.update() c[keep.id] = keep c[remove.id] = remove return c
def _split_cluster_by_most_vote(c, p)
split cluster by most-vote strategy
2.358468
2.338674
1.008464
global REMOVED init = len(list_c) list_c = {k: v for k, v in list_c.iteritems() if len(_get_seqs(v)) > parameters.min_seqs} logger.debug("_clean_cluster: number of clusters %s " % len(list_c.keys())) list_c = {k: _select_loci(v) for k, v in list_c.iteritems()} end = len(list_c) REMOVED += init - end return list_c
def _clean_cluster(list_c)
Remove cluster with less than 10 sequences and loci with size smaller than 60%
3.970388
3.564779
1.113782
loci_len = {k: len(v) for k, v in c.loci2seq.iteritems()} logger.debug("_select_loci: number of loci %s" % len(c.loci2seq.keys())) loci_len_sort = sorted(loci_len.iteritems(), key=operator.itemgetter(1), reverse=True) max_size = loci_len_sort[0][1] logger.debug("_select_loci: max size %s" % max_size) loci_clean = {locus: c.loci2seq[locus] for locus, size in loci_len_sort if size > 0.8 * max_size} c.loci2seq = loci_clean removed = list(set(c.idmembers.keys()) - set(_get_seqs(c))) c.add_id_member(removed, loci_len_sort[0][0]) logger.debug("_select_loci: number of loci %s after cleaning" % len(c.loci2seq.keys())) return c
def _select_loci(c)
Select only loci with most abundant sequences
2.639296
2.528623
1.043768
first_run = 0 seen_seqs = list() n_cluster += 1 logger.debug("_solve_loci:new cluster %s" % n_cluster) new_c = cluster(n_cluster) for idl, lenl in locilen_sorted: locus_seqs = c.loci2seq[idl] if first_run == 0: seen_seqs = locus_seqs first_run = 1 first_idl = idl intersect = list(set(seen_seqs).intersection(locus_seqs)) common = 0 if intersect: common = len(intersect)*1.0/min(len(seen_seqs), len(locus_seqs)) logger.debug("_sole_loci:id %s idl %s len %s max %s seen %s inter %s common %s " % (c.id, idl, lenl, maxseq, len(seen_seqs), len(intersect), common)) if common*1.0 >= 0.6: if lenl*1.0 >= 0.6*maxseq: c, new_c, seen_seqs = _merge_loci_in_cluster(c, new_c, idl, seen_seqs) else: c, new_c, seen_seqs = _merge_with_first_loci(c, new_c, first_idl, idl, seen_seqs) else: c = _remove_seqs_from_loci(c, idl, seen_seqs) filtered[n_cluster] = new_c return c, seen_seqs, filtered, n_cluster
def _solve_loci_deprecated(c, locilen_sorted, seen_seqs, filtered, maxseq, n_cluster)
internal function to reduce loci complexity The function will read the all loci in a cluster of sequences and will determine if all loci are part of the same transcriptional unit(TU) by most-vote locus or by exclusion of common sequence that are the minority of two loci. :param c: class cluster :param locilen_sorted: list of loci sorted by size :param seem_seqs: list of seen sequences :param filtered: final TU list :param maxseq: bigger locus "param n_cluster: integer with index of different TU" :return c: updated class cluster seen_seqs: updated list of sequences filtered: updated dict of TUs n_cluster: updated int with current index of TUs
2.963111
2.93397
1.009932
ann = set() if not string: return "This cluster is inter-genic." for item in string: for db in item: ann = ann.union(set(item[db])) return "annotated as: %s ..." % ",".join(list(ann)[:3])
def _get_description(string)
Parse annotation to get nice description
9.66166
8.889337
1.086882
x = set() for sample in profile: x = x.union(set(profile[sample].keys())) if not x: return '' end, start = max(x), min(x) x = range(start, end, 4) scaled_profile = defaultdict(list) for pos in x: for sample in profile: y = _get_closer(profile[sample], pos) if y: scaled_profile[sample].append(profile[sample][y]) else: scaled_profile[sample].append(0) return {'x': list(x), 'y': scaled_profile, 'names': scaled_profile.keys()}
def _set_format(profile)
Prepare dict to list of y values with same x
3.457015
3.103814
1.113796
with con: cur = con.cursor() cur.execute("DROP TABLE IF EXISTS clusters;") cur.execute("CREATE TABLE clusters(Id INT, Description TEXT, Locus TEXT, Annotation TEXT, Sequences TEXT, Profile TXT, Precursor TXT)") for c in data[0]: locus = json.dumps(data[0][c]['loci']) annotation = json.dumps(data[0][c]['ann']) description = _get_description(data[0][c]['ann']) sequences = json.dumps(_get_sequences(data[0][c])) keys = data[0][c]['freq'][0].values()[0].keys() profile = "Not available." if 'profile' in data[0][c]: profile = json.dumps(_set_format(data[0][c]['profile'])) precursor = json.dumps(data[0][c].get('precursor')) cur.execute("INSERT INTO clusters VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s')" % (c, description, locus, annotation, sequences, profile, precursor))
def _insert_data(con, data)
insert line for each cluster
3.082698
2.938723
1.048992
loc_id = 1 bedfile_clusters = "" bamfile = pybedtools.BedTool(file_in) bed = pybedtools.BedTool.bam_to_bed(bamfile) for c, start, end, name, q, strand in bed: loc_id += 1 bedfile_clusters += "%s\t%s\t%s\t%s\t%s\t%s\n" % \ (c, start, end, name, loc_id, strand) return bedfile_clusters
def parse_align_file(file_in)
Parse sam files with aligned sequences
3.310392
3.213051
1.030295
name = "" index = 1 total = defaultdict(int) with open(in_file) as handle_in: line = handle_in.readline().strip() cols = line.split("\t") samples = cols[2:] for line in handle_in: line = line.strip() cols = line.split("\t") name = int(cols[0].replace("seq_", "")) seq = cols[1] exp = {} for i in range(len(samples)): exp[samples[i]] = int(cols[i+2]) total[samples[i]] += int(cols[i+2]) index = index+1 if name in seq_obj: seq_obj[name].set_freq(exp) seq_obj[name].set_seq(seq) # new_s = sequence(seq, exp, index) # seq_l[name] = new_s seq_obj = _normalize_seqs(seq_obj, total) return seq_obj, total, index
def parse_ma_file(seq_obj, in_file)
read seqs.ma file and create dict with sequence object
2.976148
3.044256
0.977627
field = field.lower() try: group = cols[2] attrs = cols[8].split(";") name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith(field)] if not name: name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("gene_id")] if not name: name = ["None"] biotype = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("biotype")] if biotype: group = biotype[0] c = cols[0] s = int(cols[3]) e = int(cols[4]) st = cols[6] return [c, s, e, st, group, name[0]] except(Exception, e): logger.error(cols) logger.error("File is not in correct format") logger.error("Expect chr source feature start end . strand attributes") logger.error("Attributes are 'gene_name SNCA; gene_id ENSG; '") logger.error("The 3rd column is used as type of small RNA (like miRNA)") logger.error("at least should contains '; *name NAME; '") logger.error(e) raise
def read_gtf_line(cols, field="name")
parse gtf line to get class/name information
3.856556
3.817334
1.010275
strd = "-" if pos_a[2] in pos_b[2]: strd = "+" if pos_a[2] in "+" and pos_b[2] in "+": lento5 = pos_a[0] - pos_b[1] + 1 lento3 = pos_a[1] - pos_b[1] + 1 if pos_a[2] in "+" and pos_b[2] in "-": lento5 = pos_a[1] - pos_b[0] + 1 lento3 = pos_a[0] - pos_b[1] + 1 if pos_a[2] in "-" and pos_b[2] in "+": lento5 = pos_a[0] - pos_b[1] + 1 lento3 = pos_a[1] - pos_b[0] + 1 if pos_a[2] in "-" and pos_b[2] in "-": lento3 = pos_a[0] - pos_b[0] + 1 lento5 = pos_a[1] - pos_b[1] + 1 else: lento5 = pos_a[0] - pos_b[0] + 1 lento3 = pos_a[1] - pos_b[1] + 1 return lento5, lento3, strd
def _position_in_feature(pos_a, pos_b)
return distance to 3' and 5' end of the feature
1.584588
1.537428
1.030674
id_sa, id_ea, id_id, id_idl, id_sta = 1, 2, 3, 4, 5 if type_ann == "bed": id_sb = 7 id_eb = 8 id_stb = 11 id_tag = 9 ida = 0 clus_id = clus_obj.clus loci_id = clus_obj.loci db = os.path.splitext(db)[0] logger.debug("Type:%s\n" % type_ann) for cols in c.features(): if type_ann == "gtf": cb, sb, eb, stb, db, tag = read_gtf_line(cols[6:], feature_id) else: sb = int(cols[id_sb]) eb = int(cols[id_eb]) stb = cols[id_stb] tag = cols[id_tag] id = int(cols[id_id]) idl = int(cols[id_idl]) if (id in clus_id): clus = clus_id[id] sa = int(cols[id_sa]) ea = int(cols[id_ea]) ida += 1 lento5, lento3, strd = _position_in_feature([sa, ea, cols[id_sta]], [sb, eb, stb]) if db in loci_id[idl].db_ann: ann = annotation(db, tag, strd, lento5, lento3) tdb = loci_id[idl].db_ann[db] tdb.add_db_ann(ida, ann) loci_id[idl].add_db(db, tdb) else: ann = annotation(db, tag, strd, lento5, lento3) tdb = dbannotation(1) tdb.add_db_ann(ida, ann) loci_id[idl].add_db(db, tdb) clus_id[id] = clus clus_obj.clus = clus_id clus_obj.loci = loci_id return clus_obj
def anncluster(c, clus_obj, db, type_ann, feature_id="name")
intersect transcription position with annotation files
3.12303
3.059709
1.020695
if not genome: logger.info("No genome given. skipping.") return None out_file = op.join(out, op.basename(bam_in) + "_cov.tsv") if file_exists(out_file): return None fai = genome + ".fai" cov = pybedtools.BedTool(bam_in).genome_coverage(g=fai, max=1) cov.saveas(out_file) total = 0 for region in cov: if region[0] == "genome" and int(region[1]) != 0: total += float(region[4]) logger.info("Total genome with sequences: %s " % total)
def detect_complexity(bam_in, genome, out)
genome coverage of small RNA
3.753753
3.697613
1.015183
seq_obj = defaultdict(int) if mask: mask_file = op.splitext(bam_in)[0] + "_mask.bam" if not file_exists(mask_file): pybedtools.BedTool(bam_file).intersect(b=mask, v=True).saveas(mask_file) bam_in = mask_file out_file = op.splitext(bam_in)[0] + "_rmlw.bam" # bam.index(bam_in, {'algorithm':{}}) run("samtools index %s" % bam_in) if not file_exists(bam_in + ".bai"): raise IOError("Failed to created bam index of %s. Try to do it manually" % bam_in) bam_handle = pysam.AlignmentFile(bam_in, "rb") with pysam.AlignmentFile(out_file, "wb", template=bam_handle) as out_handle: for read in bam_handle.fetch(): seq_name = int(read.query_name.replace('seq_', '')) match_size = [nts for oper, nts in read.cigartuples if oper == 0] subs_size = [nts for oper, nts in read.cigartuples if oper == 4] if match_size[0] < 17: continue if subs_size: if subs_size[0] > 3: continue try: nh = read.get_tag('NH') except KeyError: nh = 1 seq_obj[seq_name] = sequence(seq_name) seq_obj[seq_name].align = nh out_handle.write(read) return out_file, seq_obj
def clean_bam_file(bam_in, mask=None)
Remove from alignment reads with low counts and highly # of hits
2.93658
2.885152
1.017825
current_loci = {} current_clus = {} # sequence2clusters = [set()] * (max(current_seq.keys()) + 2) sequence2clusters = defaultdict(set) lindex = 0 eindex = 0 previous_id = 0 for line in c.features(): c, start, end, name, score, strand, c_id = line name = int(name.replace('seq_', '')) pos = int(start) if strand == "+" else int(end) if name not in current_seq: continue if c.find('Un_gl') > -1 and non_un_gl: continue if c_id != previous_id: if previous_id > 0: if len(current_clus[eindex].idmembers) < MIN_SEQ: for s in current_clus[eindex].idmembers: sequence2clusters[s] = sequence2clusters[s] - set([eindex]) del current_clus[eindex] logger.debug("detect_cluster: %s %s %s" % (c_id, previous_id, name)) lindex += 1 eindex += 1 current_clus[eindex] = cluster(eindex) newpos = position(lindex, c, start, end, strand) current_loci[lindex] = newpos # update locus, sequences in each line current_loci[lindex].end = int(end) current_loci[lindex].coverage[pos] += 1 size = range(pos, pos + current_seq[name].len) current_loci[lindex].counts.update(dict(zip(size, [current_seq[name].total()] * current_seq[name].len))) current_clus[eindex].idmembers[name] = 1 current_clus[eindex].add_id_member([name], lindex) current_seq[name].add_pos(lindex, pos) # current_seq[name].align = 1 previous_id = c_id sequence2clusters[name].add(eindex) logger.info("%s Clusters read" % eindex) # merge cluster with shared sequences metacluster_obj, cluster_id = _find_metaclusters(current_clus, sequence2clusters, current_seq, MIN_SEQ) return cluster_info_obj(current_clus, metacluster_obj, current_loci, current_seq)
def detect_clusters(c, current_seq, MIN_SEQ, non_un_gl=False)
Parse the merge file of sequences position to create clusters that will have all sequences that shared any position on the genome :param c: file from bedtools with merge sequence positions :param current_seq: list of sequences :param MIN_SEQ: int cutoff to keep the cluster or not. 10 as default :return: object with information about: * cluster * dict with sequences (as keys) and cluster_id (as value) * sequences * loci
3.919739
3.780112
1.036937
seen = defaultdict(int) metacluster = defaultdict(set) c_index = len(sequence2clusters) logger.info("Creating meta-clusters based on shared sequences: %s" % c_index) meta_idx = 1 bar = ProgressBar(maxval=c_index) bar.start() bar.update() for itern, name in enumerate(sequence2clusters): clusters = sequence2clusters[name] if len(clusters) == 0: c_index -= 1 continue current_seq[name].align = 1 meta_idx += 1 bar.update(itern) already_in = _common(clusters, seen) _update(clusters, meta_idx, seen) metacluster[meta_idx] = metacluster[meta_idx].union(clusters) if already_in: for seen_metacluster in already_in: clusters2merge = metacluster[seen_metacluster] metacluster[meta_idx] = metacluster[meta_idx].union(clusters2merge) _update(clusters2merge, meta_idx, seen) # metacluster[seen_metacluster] = 0 del metacluster[seen_metacluster] logger.info("%s metaclusters from %s sequences" % (len(metacluster), c_index)) return metacluster, seen
def _find_metaclusters(clus_obj, sequence2clusters, current_seq, min_seqs)
Mask under same id all clusters that share sequences :param clus_obj: cluster object coming from detect_cluster :param min_seqs: int cutoff to keep the cluster or not. 10 as default :return: updated clus_obj and dict with seq_id: cluster_id
3.555979
3.736573
0.951668
logger.info("Creating meta-clusters based on shared sequences.") seen = defaultdict() metacluster = defaultdict(list) c_index = clus_obj.keys() meta_idx = 0 with ProgressBar(maxval=len(c_index), redirect_stdout=True) as p: for itern, c in enumerate(c_index): p.update(itern) clus = clus_obj[c] if len(clus.idmembers.keys()) < min_seqs: del clus_obj[c] continue logger.debug("reading cluster %s" % c) logger.debug("loci2seq %s" % clus.loci2seq) already_in, not_in = _get_seqs_from_cluster(clus.idmembers.keys(), seen) logger.debug("seen %s news %s" % (already_in, not_in)) meta_idx += 1 metacluster[meta_idx].append(c) seen.update(dict(zip(not_in, [meta_idx] * len(not_in)))) if len(already_in) > 0: logger.debug("seen in %s" % already_in) for eindex in already_in: for cluster in metacluster[eindex]: metacluster[meta_idx].append(cluster) prev_clus = clus_obj[cluster] logger.debug("_find_families: prev %s current %s" % (eindex, clus.id)) # add current seqs to seen cluster seqs_in = prev_clus.idmembers.keys() seen.update(dict(zip(seqs_in, [meta_idx] * len(seqs_in)))) # for s_in_clus in prev_clus.idmembers: # seen[s_in_clus] = meta_idx # clus.idmembers[s_in_clus] = 1 # add current locus to seen cluster # for loci in prev_clus.loci2seq: # logger.debug("adding %s" % loci) # if not loci_old in current_clus[eindex].loci2seq: # clus.add_id_member(list(prev_clus.loci2seq[loci]), loci) # logger.debug("loci %s" % clus.loci2seq.keys()) del metacluster[eindex] # clus_obj[c] = clus # logger.debug("num cluster %s" % len(clus_obj.keys())) logger.info("%s clusters merged" % len(metacluster)) return metacluster, seen
def _find_families_deprecated(clus_obj, min_seqs)
Mask under same id all clusters that share sequences :param clus_obj: cluster object coming from detect_cluster :param min_seqs: int cutoff to keep the cluster or not. 10 as default :return: updated clus_obj and dict with seq_id: cluster_id
3.315736
3.299647
1.004876
new_cluster = {} for cid in clus_obj.clus: cluster = clus_obj.clus[cid] cluster.update() logger.debug("peak calling for %s" % cid) bigger = cluster.locimaxid if bigger in clus_obj.loci: s, e = min(clus_obj.loci[bigger].counts.keys()), max(clus_obj.loci[bigger].counts.keys()) scale = s if clus_obj.loci[bigger].strand == "-": scale = e logger.debug("bigger %s at %s-%s" % (bigger, s, e)) dt = np.array([0] * (abs(e - s) + 12)) for pos in clus_obj.loci[bigger].counts: ss = abs(int(pos) - scale) + 5 dt[ss] += clus_obj.loci[bigger].counts[pos] x = np.array(range(0, len(dt))) logger.debug("x %s and y %s" % (x, dt)) # tab = pd.DataFrame({'x': x, 'y': dt}) # tab.to_csv( str(cid) + "peaks.csv", mode='w', header=False, index=False) if len(x) > 35 + 12: peaks = list(np.array(pysen.pysenMMean(x, dt)) - 5) logger.debug(peaks) else: peaks = ['short'] cluster.peaks = peaks new_cluster[cid] = cluster clus_obj.clus = new_cluster return clus_obj
def peak_calling(clus_obj)
Run peak calling inside each cluster
3.635493
3.61675
1.005182
if args.fasta: name = None seq = "" reads = dict() with open(args.fasta) as in_handle: for line in in_handle: if line.startswith(">"): if name: reads.update(_generate_reads(seq, name)) seq = "" name = line[1:-1] else: seq += line.strip() reads.update(_generate_reads(seq, name)) _write_reads(reads, args.out)
def simulate(args)
Main function that manage simulatin of small RNAs
2.915889
2.802606
1.04042
reads = dict() if len(seq) < 130 and len(seq) > 70: reads.update(_mature(seq[:40], 0, name)) reads.update(_mature(seq[-40:], len(seq) - 40, name)) reads.update(_noise(seq, name)) reads.update(_noise(seq, name, 25)) return reads
def _generate_reads(seq, name)
Main function that create reads from precursors
3.415951
3.309884
1.032046
reads = dict() probs = [0.1, 0.2, 0.4, 0.2, 0.1] end = 5 + size error = [-2, -1, 0, 1, 2] for error5 in error: for error3 in error: s = 5 - error5 e = end - error3 seen = subseq[s:e] counts = int(probs[error5 + 2] * probs[error3 + 2] * total) + 1 name = "seq_%s_%s_%s_x%s" % (c, s + absolute, e + absolute, counts) reads[name] = (seen, counts) return reads
def _mature(subseq, absolute, c, size=33, total=5000)
Create mature sequences around start/end
4.325837
4.258776
1.015747
reads = dict() seen = 0 while seen < total: s = random.randint(0, len(seq) - size) e = s + size + random.randint(-5,5) p = random.uniform(0, 0.1) counts = int(p * total) + 1 seen += counts name = "seq_%s_%s_%s_x%s" % (c, s, e, counts) reads[name] = (seq[s:e], counts) return reads
def _noise(seq, c, size=33, total=1000)
Create mature sequences around start/end
3.458737
3.212781
1.076555
out_ma = prefix + ".ma" out_fasta = prefix + ".fasta" out_real = prefix + ".txt" with open(out_ma, 'w') as ma_handle: print("id\tseq\tsample", file=ma_handle, end="") with open(out_fasta, 'w') as fa_handle: with open(out_real, 'w') as read_handle: for idx, r in enumerate(reads): info = r.split("_") print("seq_%s\t%s\t%s" % (idx, reads[r][0], reads[r][1]), file=ma_handle, end="") print(">seq_%s\n%s" % (idx, reads[r][0]), file=fa_handle, end="") print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (idx, r, reads[r][0], reads[r][1], info[1], info[2], info[3]), file=read_handle, end="")
def _write_reads(reads, prefix)
Write fasta file, ma file and real position
2.244984
2.034471
1.103473
logger.info("Reading sequeces") data = parse_ma_file(args.ma) logger.info("Get sequences from sam") is_align = _read_sam(args.sam) is_json, is_db = _read_json(args.json) res = _summarise_sam(data, is_align, is_json, is_db) _write_suma(res, os.path.join(args.out, "stats_align.dat")) logger.info("Done")
def stats(args)
Create stats from the analysis
5.980335
5.936868
1.007322
is_json = set() is_db = {} with open(fn_json) as handle: data = json.load(handle) # original Py 2.y core #for item in data[0].values(): # seqs_name = map(lambda (x): x.keys(), item['seqs']) # rewrite by 2to3 for item in list(data[0].values()): seqs_name = [list(x.keys()) for x in item['seqs']] db_name = item['valid'] if "valid" in item else None [is_json.add(name[0]) for name in seqs_name] if db_name: [is_db.update({name[0]: ",".join(db_name)}) for name in seqs_name] return is_json, is_db
def _read_json(fn_json)
read json information
4.67626
4.616518
1.012941
try: logger.debug(" ".join(str(x) for x in cmd) if not isinstance(cmd, basestring) else cmd) _do_run(cmd, checks, log_stdout) except: if log_error: logger.info("error at command") raise
def run(cmd, data=None, checks=None, region=None, log_error=True, log_stdout=False)
Run the provided command, logging details and checking for errors.
4.851421
5.027627
0.964952
if isinstance(cmd, basestring): # check for standard or anonymous named pipes if cmd.find(" | ") > 0 or cmd.find(">(") or cmd.find("<("): return "set -o pipefail; " + cmd, True, find_bash() else: return cmd, True, None else: return [str(x) for x in cmd], False, None
def _normalize_cmd_args(cmd)
Normalize subprocess arguments to handle list commands, string and pipes. Piped commands set pipefail and require use of bash to help with debugging intermediate errors.
6.017376
4.957343
1.213831
cmd, shell_arg, executable_arg = _normalize_cmd_args(cmd) s = subprocess.Popen(cmd, shell=shell_arg, executable=executable_arg, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) debug_stdout = collections.deque(maxlen=100) while 1: line = s.stdout.readline() if line: debug_stdout.append(line) if log_stdout: logger.debug(line.rstrip()) else: logger.debug(line.rstrip()) exitcode = s.poll() if exitcode is not None: for line in s.stdout: debug_stdout.append(line) if exitcode is not None and exitcode != 0: error_msg = " ".join(cmd) if not isinstance(cmd, basestring) else cmd error_msg += "\n" error_msg += "".join(debug_stdout) s.communicate() s.stdout.close() raise subprocess.CalledProcessError(exitcode, error_msg) else: break s.communicate() s.stdout.close() # Check for problems not identified by shell return codes if checks: for check in checks: if not check(): raise IOError("External command failed")
def _do_run(cmd, checks, log_stdout=False)
Perform running and check results, raising errors for issues.
2.649412
2.589422
1.023167
seqs = defaultdict(set) # n = len(list_c.keys()) for c in list_c.values(): for l in c.loci2seq: [seqs[s].add(c.id) for s in c.loci2seq[l]] common = [s for s in seqs if len(seqs[s]) > 1] seqs_in_c = defaultdict(float) for c in list_c.values(): for l in c.loci2seq: # total = sum([v for v in loci_obj[l].coverage.values()]) for s in c.loci2seq[l]: if s in common: pos = seq_obj[s].pos[l] # cov = 1.0 * loci_obj[l].coverage[pos] / total cov = 1.0 * loci_obj[l].coverage[pos] if seqs_in_c[(s, c.id)] < cov: seqs_in_c[(s, c.id)] = cov seqs_in_c = _transform(seqs_in_c) return seqs_in_c
def _dict_seq_locus(list_c, loci_obj, seq_obj)
return dict with sequences = [ cluster1, cluster2 ...]
2.654959
2.652472
1.000938
for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) self.Normalize()
def Update(self, data)
Updates the PMF with new data. data: string cookie type
9.432084
9.152918
1.0305
mix = self.loci[hypo] like = mix[data] return like
def Likelihood(self, data, hypo)
The likelihood of the data under the hypothesis. data: string cookie type hypo: string bowl ID
18.405304
15.598875
1.179912
current = clus_obj.clus clus_seqt = clus_obj.seq clus_locit = clus_obj.loci itern = 0 for idc in current.keys(): itern += 1 timestamp = str(idc) seqListTemp = () f = open("/tmp/"+timestamp+".fa","w") for idl in current[idc].loci2seq.keys(): seqListTemp = list(set(seqListTemp).union(current[idc].loci2seq[idl])) maxscore = 0 for s in seqListTemp: score = calculate_size(clus_seqt[s].freq) maxscore = max(maxscore,score) clus_seqt[s].score = score seq = clus_seqt[s] f.write(">"+s+"\n"+seq.seq+"\n") f.close() locilen_sorted = sorted(current[idc].locilen.iteritems(), key = operator.itemgetter(1),reverse = True) lmax = clus_locit[locilen_sorted[0][0]] f = open("/tmp/"+timestamp+".bed","w") f.write("%s\t%s\t%s\t.\t.\t%s\n" % (lmax.chr,lmax.start,lmax.end,lmax.strand)) f.close() os.system("bedtools getfasta -s -fi "+index+" -bed /tmp/"+timestamp+".bed -fo /tmp/"+timestamp+".pre.fa") os.system("bowtie2-build /tmp/"+timestamp+".pre.fa /tmp/"+timestamp+".pre.ind >/dev/null 2>&1") os.system("bowtie2 --rdg 7,3 --mp 4 --end-to-end --no-head --no-sq -D 20 -R 3 -N 0 -i S,1,0.8 -L 3 -f /tmp/"+timestamp+".pre.ind /tmp/"+timestamp+".fa -S /tmp/"+timestamp+".map >>bowtie.log 2>&1") f = open("/tmp/"+timestamp+".map","r") seqpos = {} minv = 10000000 for line in f: line = line.strip() cols = line.split("\t") seqpos[cols[0]] = int(cols[3]) if minv>int(cols[3]): minv = int(cols[3]) f.close() seqpos_sorted = sorted(seqpos.iteritems(), key = operator.itemgetter(1),reverse = False) showseq = "" showseq_plain = "" for (s,pos) in seqpos_sorted: ratio = (clus_seqt[s].score*1.0/maxscore*100.0) realScore = (math.log(ratio,2)*2) if realScore<0: realScore = 0 # "score %s max %s ratio %s real %.0f" % (clus_seqt[s].score,maxscore,ratio,realScore) ##calculate the mean expression of the sequence and change size letter showseq_plain += "<br>%s<a style = \"font-size:%.0fpx;\"href = javascript:loadSeq(\"%s\")>%s</a>" % ("".join("." for i in range(pos-1)),realScore+10,s,clus_seqt[s].seq) #showseq+ = seqviz.addseq(pos-1,clus_seqt[s].len,clus_seqt[s].seq) #current[idc].showseq = showseq current[idc].showseq_plain = showseq_plain os.system("rm /tmp/"+timestamp+"*") clus_obj.clus = current clus_obj.seq = clus_seqt return clus_obj
def show_seq(clus_obj, index)
Get the precursor and map sequences to it. this way we create a positional map.
3.531028
3.528829
1.000623
for ids in s: obj = s[ids] [obj.norm_freq.update({sample: 1.0 * obj.freq[sample] / (t[sample]+1) * 1000000}) for sample in obj.norm_freq] s[ids] = obj return s
def _normalize_seqs(s, t)
Normalize to RPM
5.649389
5.54867
1.018152
# use pybedtools to keep valid positions # intersect option with -b bigger_cluster_loci a = pybedtools.BedTool(bam_in) b = pybedtools.BedTool(precursors) c = a.intersect(b, u=True) out_file = utils.splitext_plus(op.basename(bam_in))[0] + "_clean.bam" c.saveas(out_file) return op.abspath(out_file)
def prepare_bam(bam_in, precursors)
Clean BAM file to keep only position inside the bigger cluster
4.751515
4.000866
1.187622
new_bed = utils.splitext_plus(bed_file)[0] + '_order.bed' with open(bed_file) as in_handle: with open(new_bed, 'w') as out_handle: for line in in_handle: cols = line.strip().split("\t") cols[3] = _select_anno(cols[3]) + "_" + cols[4] cols[4] = "0" print("\t".join(cols), file=out_handle, end="") return new_bed
def _reorder_columns(bed_file)
Reorder columns to be compatible with CoRaL
2.496676
2.454206
1.017305
new_cov = utils.splitext_plus(cov_file)[0] + '_fix.cov' with open(cov_file) as in_handle: with open(new_cov, 'w') as out_handle: for line in in_handle: cols = line.strip().split("\t") cols[4] = cols[6] print("\t".join(cols[0:6]), file=out_handle, end="") return new_cov
def _fix_score_column(cov_file)
Move counts to score columns in bed file
2.376396
2.304691
1.031112
bed_file = _reorder_columns(bed_file) counts_reads_cmd = ("coverageBed -s -counts -b {bam_in} " "-a {bed_file} | sort -k4,4 " "> {out_dir}/loci.cov") # with tx_tmpdir() as temp_dir: with utils.chdir(out_dir): run(counts_reads_cmd.format(min_trimmed_read_len=min_trimmed_read_len, max_trimmed_read_len=max_trimmed_read_len, **locals()), "Run counts_reads") loci_file = _fix_score_column(op.join(out_dir, "loci.cov")) return loci_file
def detect_regions(bam_in, bed_file, out_dir, prefix)
Detect regions using first CoRaL module
5.13515
5.197759
0.987955
new_cov = op.join(op.dirname(cov_file), 'feat_antisense.txt') with open(cov_file) as in_handle: with open(new_cov, 'w') as out_handle: print("name\tantisense", file=out_handle, end="") for line in in_handle: cols = line.strip().split("\t") cols[6] = 0 if cols[6] < min_reads else cols[6] print("%s\t%s" % (cols[3], cols[6]), file=out_handle, end="") return new_cov
def _order_antisense_column(cov_file, min_reads)
Move counts to score columns in bed file
2.368641
2.310982
1.02495
data = Counter() a = pybedtools.BedTool(bam_in) b = pybedtools.BedTool(loci_file) c = a.intersect(b, s=True, bed=True, wo=True) for line in c: end = int(line[1]) + 1 + int(line[2]) if line[5] == "+" else int(line[1]) + 1 start = int(line[1]) + 1 if line[5] == "+" else int(line[1]) + 1 + int(line[2]) side5 = "%s\t5p\t%s" % (line[15], start) side3 = "%s\t3p\t%s" % (line[15], end) data[side5] += 1 data[side3] += 1 counts_reads = op.join(out_dir, 'locus_readpos.counts') with open(counts_reads, 'w') as out_handle: for k in data: print(k, file=out_handle, end="") return counts_reads
def _reads_per_position(bam_in, loci_file, out_dir)
Create input for compute entropy
2.601755
2.590218
1.004454