id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
223,200
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_counts_to_amber
def _counts_to_amber(t_vals, n_vals): """Converts a line of CollectAllelicCounts into AMBER line. """ t_depth = int(t_vals["REF_COUNT"]) + int(t_vals["ALT_COUNT"]) n_depth = int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"]) if n_depth > 0 and t_depth > 0: t_baf = float(t_vals["ALT_COUNT"]) / float(t_depth) n_baf = float(n_vals["ALT_COUNT"]) / float(n_depth) return [t_vals["CONTIG"], t_vals["POSITION"], t_baf, _normalize_baf(t_baf), t_depth, n_baf, _normalize_baf(n_baf), n_depth]
python
def _counts_to_amber(t_vals, n_vals): """Converts a line of CollectAllelicCounts into AMBER line. """ t_depth = int(t_vals["REF_COUNT"]) + int(t_vals["ALT_COUNT"]) n_depth = int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"]) if n_depth > 0 and t_depth > 0: t_baf = float(t_vals["ALT_COUNT"]) / float(t_depth) n_baf = float(n_vals["ALT_COUNT"]) / float(n_depth) return [t_vals["CONTIG"], t_vals["POSITION"], t_baf, _normalize_baf(t_baf), t_depth, n_baf, _normalize_baf(n_baf), n_depth]
[ "def", "_counts_to_amber", "(", "t_vals", ",", "n_vals", ")", ":", "t_depth", "=", "int", "(", "t_vals", "[", "\"REF_COUNT\"", "]", ")", "+", "int", "(", "t_vals", "[", "\"ALT_COUNT\"", "]", ")", "n_depth", "=", "int", "(", "n_vals", "[", "\"REF_COUNT\""...
Converts a line of CollectAllelicCounts into AMBER line.
[ "Converts", "a", "line", "of", "CollectAllelicCounts", "into", "AMBER", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L113-L122
223,201
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_count_files_to_amber
def _count_files_to_amber(tumor_counts, normal_counts, work_dir, data): """Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format. """ amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, tumor_counts): with file_transaction(data, out_file) as tx_out_file: with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "TumorBAF", "TumorModifiedBAF", "TumorDepth", "NormalBAF", "NormalModifiedBAF", "NormalDepth"]) header = None for t, n in zip(tumor_handle, normal_handle): if header is None and t.startswith("CONTIG"): header = t.strip().split() elif header is not None: t_vals = dict(zip(header, t.strip().split())) n_vals = dict(zip(header, n.strip().split())) amber_line = _counts_to_amber(t_vals, n_vals) if amber_line: writer.writerow(amber_line) return out_file
python
def _count_files_to_amber(tumor_counts, normal_counts, work_dir, data): """Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format. """ amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, tumor_counts): with file_transaction(data, out_file) as tx_out_file: with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "TumorBAF", "TumorModifiedBAF", "TumorDepth", "NormalBAF", "NormalModifiedBAF", "NormalDepth"]) header = None for t, n in zip(tumor_handle, normal_handle): if header is None and t.startswith("CONTIG"): header = t.strip().split() elif header is not None: t_vals = dict(zip(header, t.strip().split())) n_vals = dict(zip(header, n.strip().split())) amber_line = _counts_to_amber(t_vals, n_vals) if amber_line: writer.writerow(amber_line) return out_file
[ "def", "_count_files_to_amber", "(", "tumor_counts", ",", "normal_counts", ",", "work_dir", ",", "data", ")", ":", "amber_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"amber\"", ")", ")", "out_file", ...
Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format.
[ "Converts", "tumor", "and", "normal", "counts", "from", "GATK", "CollectAllelicCounts", "into", "Amber", "format", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L124-L148
223,202
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_amber_het_file
def _amber_het_file(method, vrn_files, work_dir, paired): """Create file of BAFs in normal heterozygous positions compatible with AMBER. Two available methods: - pon -- Use panel of normals with likely heterozygous sites. - variants -- Use pre-existing variant calls, filtered to likely heterozygotes. https://github.com/hartwigmedical/hmftools/tree/master/amber https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf """ assert vrn_files, "Did not find compatible variant calling files for PURPLE inputs" from bcbio.heterogeneity import bubbletree if method == "variants": amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data)) prep_file = bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"], work_dir, paired, AmberWriter) utils.symlink_plus(prep_file, out_file) pcf_file = out_file + ".pcf" if not utils.file_exists(pcf_file): with file_transaction(paired.tumor_data, pcf_file) as tx_out_file: r_file = os.path.join(os.path.dirname(tx_out_file), "bafSegmentation.R") with open(r_file, "w") as out_handle: out_handle.write(_amber_seg_script) cmd = "%s && %s --no-environ %s %s %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file, out_file, pcf_file) do.run(cmd, "PURPLE: AMBER baf segmentation") else: assert method == "pon" out_file = _run_amber(paired, work_dir) return out_file
python
def _amber_het_file(method, vrn_files, work_dir, paired): """Create file of BAFs in normal heterozygous positions compatible with AMBER. Two available methods: - pon -- Use panel of normals with likely heterozygous sites. - variants -- Use pre-existing variant calls, filtered to likely heterozygotes. https://github.com/hartwigmedical/hmftools/tree/master/amber https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf """ assert vrn_files, "Did not find compatible variant calling files for PURPLE inputs" from bcbio.heterogeneity import bubbletree if method == "variants": amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data)) prep_file = bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"], work_dir, paired, AmberWriter) utils.symlink_plus(prep_file, out_file) pcf_file = out_file + ".pcf" if not utils.file_exists(pcf_file): with file_transaction(paired.tumor_data, pcf_file) as tx_out_file: r_file = os.path.join(os.path.dirname(tx_out_file), "bafSegmentation.R") with open(r_file, "w") as out_handle: out_handle.write(_amber_seg_script) cmd = "%s && %s --no-environ %s %s %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file, out_file, pcf_file) do.run(cmd, "PURPLE: AMBER baf segmentation") else: assert method == "pon" out_file = _run_amber(paired, work_dir) return out_file
[ "def", "_amber_het_file", "(", "method", ",", "vrn_files", ",", "work_dir", ",", "paired", ")", ":", "assert", "vrn_files", ",", "\"Did not find compatible variant calling files for PURPLE inputs\"", "from", "bcbio", ".", "heterogeneity", "import", "bubbletree", "if", "...
Create file of BAFs in normal heterozygous positions compatible with AMBER. Two available methods: - pon -- Use panel of normals with likely heterozygous sites. - variants -- Use pre-existing variant calls, filtered to likely heterozygotes. https://github.com/hartwigmedical/hmftools/tree/master/amber https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf
[ "Create", "file", "of", "BAFs", "in", "normal", "heterozygous", "positions", "compatible", "with", "AMBER", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L166-L197
223,203
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_run_cobalt
def _run_cobalt(paired, work_dir): """Run Cobalt for counting read depth across genomic windows. PURPLE requires even 1000bp windows so use integrated counting solution directly rather than converting from CNVkit calculations. If this approach is useful should be moved upstream to be available to other tools as an input comparison. https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: cmd = ["COBALT"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \ ["-reference", paired.normal_name, "-reference_bam", paired.normal_bam, "-tumor", paired.tumor_name, "-tumor_bam", paired.tumor_bam, "-threads", dd.get_num_cores(paired.tumor_data), "-output_dir", os.path.dirname(tx_out_file), "-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"]] cmd = "%s && %s" % (utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PURPLE: COBALT read depth normalization") for f in os.listdir(os.path.dirname(tx_out_file)): if f != os.path.basename(tx_out_file): shutil.move(os.path.join(os.path.dirname(tx_out_file), f), os.path.join(cobalt_dir, f)) return out_file
python
def _run_cobalt(paired, work_dir): """Run Cobalt for counting read depth across genomic windows. PURPLE requires even 1000bp windows so use integrated counting solution directly rather than converting from CNVkit calculations. If this approach is useful should be moved upstream to be available to other tools as an input comparison. https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: cmd = ["COBALT"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \ ["-reference", paired.normal_name, "-reference_bam", paired.normal_bam, "-tumor", paired.tumor_name, "-tumor_bam", paired.tumor_bam, "-threads", dd.get_num_cores(paired.tumor_data), "-output_dir", os.path.dirname(tx_out_file), "-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"]] cmd = "%s && %s" % (utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PURPLE: COBALT read depth normalization") for f in os.listdir(os.path.dirname(tx_out_file)): if f != os.path.basename(tx_out_file): shutil.move(os.path.join(os.path.dirname(tx_out_file), f), os.path.join(cobalt_dir, f)) return out_file
[ "def", "_run_cobalt", "(", "paired", ",", "work_dir", ")", ":", "cobalt_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"cobalt\"", ")", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", ...
Run Cobalt for counting read depth across genomic windows. PURPLE requires even 1000bp windows so use integrated counting solution directly rather than converting from CNVkit calculations. If this approach is useful should be moved upstream to be available to other tools as an input comparison. https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines
[ "Run", "Cobalt", "for", "counting", "read", "depth", "across", "genomic", "windows", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L259-L285
223,204
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_cobalt_ratio_file
def _cobalt_ratio_file(paired, work_dir): """Convert CNVkit binning counts into cobalt ratio output. This contains read counts plus normalization for GC, from section 7.2 "Determine read depth ratios for tumor and reference genomes" https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf Since CNVkit cnr files already have GC bias correction, we re-center the existing log2 ratios to be around 1, rather than zero, which matches the cobalt expectations. XXX This doesn't appear to be a worthwhile direction since PURPLE requires 1000bp even binning. We'll leave this here as a starting point for future work but work on using cobalt directly. """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) with file_transaction(paired.tumor_data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "ReferenceReadCount", "TumorReadCount", "ReferenceGCRatio", "TumorGCRatio", "ReferenceGCDiploidRatio"]) raise NotImplementedError return out_file
python
def _cobalt_ratio_file(paired, work_dir): """Convert CNVkit binning counts into cobalt ratio output. This contains read counts plus normalization for GC, from section 7.2 "Determine read depth ratios for tumor and reference genomes" https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf Since CNVkit cnr files already have GC bias correction, we re-center the existing log2 ratios to be around 1, rather than zero, which matches the cobalt expectations. XXX This doesn't appear to be a worthwhile direction since PURPLE requires 1000bp even binning. We'll leave this here as a starting point for future work but work on using cobalt directly. """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) with file_transaction(paired.tumor_data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "ReferenceReadCount", "TumorReadCount", "ReferenceGCRatio", "TumorGCRatio", "ReferenceGCDiploidRatio"]) raise NotImplementedError return out_file
[ "def", "_cobalt_ratio_file", "(", "paired", ",", "work_dir", ")", ":", "cobalt_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"cobalt\"", ")", ")", "out_file", "=", "os", ".", "path", ".", "join", ...
Convert CNVkit binning counts into cobalt ratio output. This contains read counts plus normalization for GC, from section 7.2 "Determine read depth ratios for tumor and reference genomes" https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf Since CNVkit cnr files already have GC bias correction, we re-center the existing log2 ratios to be around 1, rather than zero, which matches the cobalt expectations. XXX This doesn't appear to be a worthwhile direction since PURPLE requires 1000bp even binning. We'll leave this here as a starting point for future work but work on using cobalt directly.
[ "Convert", "CNVkit", "binning", "counts", "into", "cobalt", "ratio", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L287-L313
223,205
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_export_to_vcf
def _export_to_vcf(cur): """Convert PURPLE custom output into VCF. """ if float(cur["copyNumber"]) > 2.0: svtype = "DUP" elif float(cur["copyNumber"]) < 2.0: svtype = "DEL" else: svtype = None if svtype: info = ["END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "SVTYPE=%s" % svtype, "CN=%s" % cur["copyNumber"], "PROBES=%s" % cur["depthWindowCount"]] return [cur["chromosome"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
python
def _export_to_vcf(cur): """Convert PURPLE custom output into VCF. """ if float(cur["copyNumber"]) > 2.0: svtype = "DUP" elif float(cur["copyNumber"]) < 2.0: svtype = "DEL" else: svtype = None if svtype: info = ["END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "SVTYPE=%s" % svtype, "CN=%s" % cur["copyNumber"], "PROBES=%s" % cur["depthWindowCount"]] return [cur["chromosome"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
[ "def", "_export_to_vcf", "(", "cur", ")", ":", "if", "float", "(", "cur", "[", "\"copyNumber\"", "]", ")", ">", "2.0", ":", "svtype", "=", "\"DUP\"", "elif", "float", "(", "cur", "[", "\"copyNumber\"", "]", ")", "<", "2.0", ":", "svtype", "=", "\"DEL...
Convert PURPLE custom output into VCF.
[ "Convert", "PURPLE", "custom", "output", "into", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L324-L337
223,206
bcbio/bcbio-nextgen
bcbio/rnaseq/pizzly.py
make_pizzly_gtf
def make_pizzly_gtf(gtf_file, out_file, data): """ pizzly needs the GTF to be in gene -> transcript -> exon order for each gene. it also wants the gene biotype set as the source """ if file_exists(out_file): return out_file db = gtf.get_gtf_db(gtf_file) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for gene in db.features_of_type("gene"): children = [x for x in db.children(id=gene)] for child in children: if child.attributes.get("gene_biotype", None): gene_biotype = child.attributes.get("gene_biotype") gene.attributes['gene_biotype'] = gene_biotype gene.source = gene_biotype[0] print(gene, file=out_handle) for child in children: child.source = gene_biotype[0] # gffread produces a version-less FASTA file child.attributes.pop("transcript_version", None) print(child, file=out_handle) return out_file
python
def make_pizzly_gtf(gtf_file, out_file, data): """ pizzly needs the GTF to be in gene -> transcript -> exon order for each gene. it also wants the gene biotype set as the source """ if file_exists(out_file): return out_file db = gtf.get_gtf_db(gtf_file) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for gene in db.features_of_type("gene"): children = [x for x in db.children(id=gene)] for child in children: if child.attributes.get("gene_biotype", None): gene_biotype = child.attributes.get("gene_biotype") gene.attributes['gene_biotype'] = gene_biotype gene.source = gene_biotype[0] print(gene, file=out_handle) for child in children: child.source = gene_biotype[0] # gffread produces a version-less FASTA file child.attributes.pop("transcript_version", None) print(child, file=out_handle) return out_file
[ "def", "make_pizzly_gtf", "(", "gtf_file", ",", "out_file", ",", "data", ")", ":", "if", "file_exists", "(", "out_file", ")", ":", "return", "out_file", "db", "=", "gtf", ".", "get_gtf_db", "(", "gtf_file", ")", "with", "file_transaction", "(", "data", ","...
pizzly needs the GTF to be in gene -> transcript -> exon order for each gene. it also wants the gene biotype set as the source
[ "pizzly", "needs", "the", "GTF", "to", "be", "in", "gene", "-", ">", "transcript", "-", ">", "exon", "order", "for", "each", "gene", ".", "it", "also", "wants", "the", "gene", "biotype", "set", "as", "the", "source" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/pizzly.py#L82-L105
223,207
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_validate_caller_vcf
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data): """Validate a caller VCF against truth within callable regions using SURVIVOR. Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/) """ stats = _calculate_comparison_stats(truth_vcf) call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data), stats, work_dir, data) truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0], "%s-truth" % dd.get_sample_name(data), stats, work_dir, data) cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data) return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
python
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data): """Validate a caller VCF against truth within callable regions using SURVIVOR. Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/) """ stats = _calculate_comparison_stats(truth_vcf) call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data), stats, work_dir, data) truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0], "%s-truth" % dd.get_sample_name(data), stats, work_dir, data) cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data) return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
[ "def", "_validate_caller_vcf", "(", "call_vcf", ",", "truth_vcf", ",", "callable_bed", ",", "svcaller", ",", "work_dir", ",", "data", ")", ":", "stats", "=", "_calculate_comparison_stats", "(", "truth_vcf", ")", "call_vcf", "=", "_prep_vcf", "(", "call_vcf", ","...
Validate a caller VCF against truth within callable regions using SURVIVOR. Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
[ "Validate", "a", "caller", "VCF", "against", "truth", "within", "callable", "regions", "using", "SURVIVOR", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L44-L55
223,208
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_survivor_merge
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data): """Perform a merge of two callsets using SURVIVOR, """ out_file = os.path.join(work_dir, "eval-merge.vcf") if not utils.file_uptodate(out_file, call_vcf): in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_call_vcf): with file_transaction(data, in_call_vcf) as tx_in_call_vcf: do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals())) in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_truth_vcf): with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf: do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals())) in_list_file = os.path.join(work_dir, "eval-inputs.txt") with open(in_list_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf)) with file_transaction(data, out_file) as tx_out_file: cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}") do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data)) return out_file
python
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data): """Perform a merge of two callsets using SURVIVOR, """ out_file = os.path.join(work_dir, "eval-merge.vcf") if not utils.file_uptodate(out_file, call_vcf): in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_call_vcf): with file_transaction(data, in_call_vcf) as tx_in_call_vcf: do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals())) in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_truth_vcf): with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf: do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals())) in_list_file = os.path.join(work_dir, "eval-inputs.txt") with open(in_list_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf)) with file_transaction(data, out_file) as tx_out_file: cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}") do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data)) return out_file
[ "def", "_survivor_merge", "(", "call_vcf", ",", "truth_vcf", ",", "stats", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"eval-merge.vcf\"", ")", "if", "not", "utils", ".", "file_uptodate",...
Perform a merge of two callsets using SURVIVOR,
[ "Perform", "a", "merge", "of", "two", "callsets", "using", "SURVIVOR" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L77-L96
223,209
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_calculate_comparison_stats
def _calculate_comparison_stats(truth_vcf): """Identify calls to validate from the input truth VCF. """ # Avoid very small events for average calculations min_stat_size = 50 min_median_size = 250 sizes = [] svtypes = set([]) with utils.open_gzipsafe(truth_vcf) as in_handle: for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")): stats = _summarize_call(call) if stats["size"] > min_stat_size: sizes.append(stats["size"]) svtypes.add(stats["svtype"]) pct10 = int(np.percentile(sizes, 10)) pct25 = int(np.percentile(sizes, 25)) pct50 = int(np.percentile(sizes, 50)) pct75 = int(np.percentile(sizes, 75)) ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50), (pct50, pct75), (pct75, max(sizes))] ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))] return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05), "svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)), "ranges": []}
python
def _calculate_comparison_stats(truth_vcf): """Identify calls to validate from the input truth VCF. """ # Avoid very small events for average calculations min_stat_size = 50 min_median_size = 250 sizes = [] svtypes = set([]) with utils.open_gzipsafe(truth_vcf) as in_handle: for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")): stats = _summarize_call(call) if stats["size"] > min_stat_size: sizes.append(stats["size"]) svtypes.add(stats["svtype"]) pct10 = int(np.percentile(sizes, 10)) pct25 = int(np.percentile(sizes, 25)) pct50 = int(np.percentile(sizes, 50)) pct75 = int(np.percentile(sizes, 75)) ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50), (pct50, pct75), (pct75, max(sizes))] ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))] return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05), "svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)), "ranges": []}
[ "def", "_calculate_comparison_stats", "(", "truth_vcf", ")", ":", "# Avoid very small events for average calculations", "min_stat_size", "=", "50", "min_median_size", "=", "250", "sizes", "=", "[", "]", "svtypes", "=", "set", "(", "[", "]", ")", "with", "utils", "...
Identify calls to validate from the input truth VCF.
[ "Identify", "calls", "to", "validate", "from", "the", "input", "truth", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L110-L133
223,210
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_get_start_end
def _get_start_end(parts, index=7): """Retrieve start and end for a VCF record, skips BNDs without END coords """ start = parts[1] end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")] if end: end = end[0] return start, end return None, None
python
def _get_start_end(parts, index=7): """Retrieve start and end for a VCF record, skips BNDs without END coords """ start = parts[1] end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")] if end: end = end[0] return start, end return None, None
[ "def", "_get_start_end", "(", "parts", ",", "index", "=", "7", ")", ":", "start", "=", "parts", "[", "1", "]", "end", "=", "[", "x", ".", "split", "(", "\"=\"", ")", "[", "-", "1", "]", "for", "x", "in", "parts", "[", "index", "]", ".", "spli...
Retrieve start and end for a VCF record, skips BNDs without END coords
[ "Retrieve", "start", "and", "end", "for", "a", "VCF", "record", "skips", "BNDs", "without", "END", "coords" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L135-L143
223,211
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_summarize_call
def _summarize_call(parts): """Provide summary metrics on size and svtype for a SV call. """ svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")] svtype = svtype[0] if svtype else "" start, end = _get_start_end(parts) return {"svtype": svtype, "size": int(end) - int(start)}
python
def _summarize_call(parts): """Provide summary metrics on size and svtype for a SV call. """ svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")] svtype = svtype[0] if svtype else "" start, end = _get_start_end(parts) return {"svtype": svtype, "size": int(end) - int(start)}
[ "def", "_summarize_call", "(", "parts", ")", ":", "svtype", "=", "[", "x", ".", "split", "(", "\"=\"", ")", "[", "1", "]", "for", "x", "in", "parts", "[", "7", "]", ".", "split", "(", "\";\"", ")", "if", "x", ".", "startswith", "(", "\"SVTYPE=\""...
Provide summary metrics on size and svtype for a SV call.
[ "Provide", "summary", "metrics", "on", "size", "and", "svtype", "for", "a", "SV", "call", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L145-L151
223,212
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_prep_callable_bed
def _prep_callable_bed(in_file, work_dir, stats, data): """Sort and merge callable BED regions to prevent SV double counting """ out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0]) gsort = config_utils.get_program("gsort", data) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: fai_file = ref.fasta_idx(dd.get_ref_file(data)) cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | " "bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Prepare SV callable BED regions") return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _prep_callable_bed(in_file, work_dir, stats, data): """Sort and merge callable BED regions to prevent SV double counting """ out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0]) gsort = config_utils.get_program("gsort", data) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: fai_file = ref.fasta_idx(dd.get_ref_file(data)) cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | " "bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Prepare SV callable BED regions") return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_prep_callable_bed", "(", "in_file", ",", "work_dir", ",", "stats", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-merge.bed.gz\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", "...
Sort and merge callable BED regions to prevent SV double counting
[ "Sort", "and", "merge", "callable", "BED", "regions", "to", "prevent", "SV", "double", "counting" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L176-L187
223,213
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_get_anns_to_remove
def _get_anns_to_remove(in_file): """Find larger annotations, if present in VCF, that slow down processing. """ to_remove = ["ANN", "LOF"] to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove]) cur_remove = [] with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("#"): break elif line.startswith(to_remove_str): cur_id = line.split("ID=")[-1].split(",")[0] cur_remove.append("INFO/%s" % cur_id) return ",".join(cur_remove)
python
def _get_anns_to_remove(in_file): """Find larger annotations, if present in VCF, that slow down processing. """ to_remove = ["ANN", "LOF"] to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove]) cur_remove = [] with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("#"): break elif line.startswith(to_remove_str): cur_id = line.split("ID=")[-1].split(",")[0] cur_remove.append("INFO/%s" % cur_id) return ",".join(cur_remove)
[ "def", "_get_anns_to_remove", "(", "in_file", ")", ":", "to_remove", "=", "[", "\"ANN\"", ",", "\"LOF\"", "]", "to_remove_str", "=", "tuple", "(", "[", "\"##INFO=<ID=%s\"", "%", "x", "for", "x", "in", "to_remove", "]", ")", "cur_remove", "=", "[", "]", "...
Find larger annotations, if present in VCF, that slow down processing.
[ "Find", "larger", "annotations", "if", "present", "in", "VCF", "that", "slow", "down", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L189-L202
223,214
bcbio/bcbio-nextgen
bcbio/structural/validate.py
cnv_to_event
def cnv_to_event(name, data): """Convert a CNV to an event name. """ cur_ploidy = ploidy.get_ploidy([data]) if name.startswith("cnv"): num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")]) if num < cur_ploidy: return "DEL" elif num > cur_ploidy: return "DUP" else: return name else: return name
python
def cnv_to_event(name, data): """Convert a CNV to an event name. """ cur_ploidy = ploidy.get_ploidy([data]) if name.startswith("cnv"): num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")]) if num < cur_ploidy: return "DEL" elif num > cur_ploidy: return "DUP" else: return name else: return name
[ "def", "cnv_to_event", "(", "name", ",", "data", ")", ":", "cur_ploidy", "=", "ploidy", ".", "get_ploidy", "(", "[", "data", "]", ")", "if", "name", ".", "startswith", "(", "\"cnv\"", ")", ":", "num", "=", "max", "(", "[", "int", "(", "x", ")", "...
Convert a CNV to an event name.
[ "Convert", "a", "CNV", "to", "an", "event", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L216-L229
223,215
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_evaluate_one
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data): """Compare a ensemble results for a caller against a specific caller and SV type. """ def cnv_matches(name): return cnv_to_event(name, data) == svtype def is_breakend(name): return name.startswith("BND") def in_size_range(max_buffer=0): def _work(feat): minf, maxf = size_range buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0)) size = feat.end - feat.start return size >= max([0, minf - buffer]) and size < maxf + buffer return _work def is_caller_svtype(feat): for name in feat.name.split(","): if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name)) and (caller == "sv-ensemble" or name.endswith(caller))): return True return False minf, maxf = size_range efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge() tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas() etotal = efeats.count() ttotal = tfeats.count() match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count() return {"sensitivity": _stat_str(match, ttotal), "precision": _stat_str(match, etotal)}
python
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data): """Compare a ensemble results for a caller against a specific caller and SV type. """ def cnv_matches(name): return cnv_to_event(name, data) == svtype def is_breakend(name): return name.startswith("BND") def in_size_range(max_buffer=0): def _work(feat): minf, maxf = size_range buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0)) size = feat.end - feat.start return size >= max([0, minf - buffer]) and size < maxf + buffer return _work def is_caller_svtype(feat): for name in feat.name.split(","): if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name)) and (caller == "sv-ensemble" or name.endswith(caller))): return True return False minf, maxf = size_range efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge() tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas() etotal = efeats.count() ttotal = tfeats.count() match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count() return {"sensitivity": _stat_str(match, ttotal), "precision": _stat_str(match, etotal)}
[ "def", "_evaluate_one", "(", "caller", ",", "svtype", ",", "size_range", ",", "ensemble", ",", "truth", ",", "data", ")", ":", "def", "cnv_matches", "(", "name", ")", ":", "return", "cnv_to_event", "(", "name", ",", "data", ")", "==", "svtype", "def", ...
Compare a ensemble results for a caller against a specific caller and SV type.
[ "Compare", "a", "ensemble", "results", "for", "a", "caller", "against", "a", "specific", "caller", "and", "SV", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L231-L258
223,216
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_plot_evaluation_event
def _plot_evaluation_event(df_csv, svtype): """Provide plot of evaluation metrics for an SV event, stratified by event size. """ titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications", "INS": "Insertions"} out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype) sns.set(style='white') if not utils.file_uptodate(out_file, df_csv): metrics = ["sensitivity", "precision"] df = pd.read_csv(df_csv).fillna("0%") df = df[(df["svtype"] == svtype)] event_sizes = _find_events_to_include(df, EVENT_SIZES) fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True) if len(event_sizes) == 1: axs = [axs] callers = sorted(df["caller"].unique()) if "sv-ensemble" in callers: callers.remove("sv-ensemble") callers.append("sv-ensemble") for i, size in enumerate(event_sizes): size_label = "%s to %sbp" % size size = "%s-%s" % size for j, metric in enumerate(metrics): ax = axs[i][j] ax.get_xaxis().set_ticks([]) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_xlim(0, 125.0) if i == 0: ax.set_title(metric, size=12, y=1.2) vals, labels = _get_plot_val_labels(df, size, metric, callers) ax.barh(range(1,len(vals)+1), vals) if j == 0: ax.tick_params(axis='y', which='major', labelsize=8) ax.locator_params(axis="y", tight=True) ax.set_yticks(range(1,len(callers)+1,1)) ax.set_yticklabels(callers, va="center") ax.text(100, len(callers)+1, size_label, fontsize=10) else: ax.get_yaxis().set_ticks([]) for ai, (val, label) in enumerate(zip(vals, labels)): ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7) if svtype in titles: fig.text(0.025, 0.95, titles[svtype], size=14) fig.set_size_inches(7, len(event_sizes) + 1) fig.savefig(out_file) return out_file
python
def _plot_evaluation_event(df_csv, svtype): """Provide plot of evaluation metrics for an SV event, stratified by event size. """ titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications", "INS": "Insertions"} out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype) sns.set(style='white') if not utils.file_uptodate(out_file, df_csv): metrics = ["sensitivity", "precision"] df = pd.read_csv(df_csv).fillna("0%") df = df[(df["svtype"] == svtype)] event_sizes = _find_events_to_include(df, EVENT_SIZES) fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True) if len(event_sizes) == 1: axs = [axs] callers = sorted(df["caller"].unique()) if "sv-ensemble" in callers: callers.remove("sv-ensemble") callers.append("sv-ensemble") for i, size in enumerate(event_sizes): size_label = "%s to %sbp" % size size = "%s-%s" % size for j, metric in enumerate(metrics): ax = axs[i][j] ax.get_xaxis().set_ticks([]) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_xlim(0, 125.0) if i == 0: ax.set_title(metric, size=12, y=1.2) vals, labels = _get_plot_val_labels(df, size, metric, callers) ax.barh(range(1,len(vals)+1), vals) if j == 0: ax.tick_params(axis='y', which='major', labelsize=8) ax.locator_params(axis="y", tight=True) ax.set_yticks(range(1,len(callers)+1,1)) ax.set_yticklabels(callers, va="center") ax.text(100, len(callers)+1, size_label, fontsize=10) else: ax.get_yaxis().set_ticks([]) for ai, (val, label) in enumerate(zip(vals, labels)): ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7) if svtype in titles: fig.text(0.025, 0.95, titles[svtype], size=14) fig.set_size_inches(7, len(event_sizes) + 1) fig.savefig(out_file) return out_file
[ "def", "_plot_evaluation_event", "(", "df_csv", ",", "svtype", ")", ":", "titles", "=", "{", "\"INV\"", ":", "\"Inversions\"", ",", "\"DEL\"", ":", "\"Deletions\"", ",", "\"DUP\"", ":", "\"Duplications\"", ",", "\"INS\"", ":", "\"Insertions\"", "}", "out_file", ...
Provide plot of evaluation metrics for an SV event, stratified by event size.
[ "Provide", "plot", "of", "evaluation", "metrics", "for", "an", "SV", "event", "stratified", "by", "event", "size", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L300-L348
223,217
bcbio/bcbio-nextgen
bcbio/structural/validate.py
evaluate
def evaluate(data): """Provide evaluations for multiple callers split by structural variant type. """ work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", dd.get_sample_name(data), "validate")) truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data) if truth_sets and data.get("sv"): if isinstance(truth_sets, dict): val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data) summary_plots = _plot_evaluation(df_csv) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv} else: assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data) title = "%s structural variants" % dd.get_sample_name(data) summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None} return data
python
def evaluate(data): """Provide evaluations for multiple callers split by structural variant type. """ work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", dd.get_sample_name(data), "validate")) truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data) if truth_sets and data.get("sv"): if isinstance(truth_sets, dict): val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data) summary_plots = _plot_evaluation(df_csv) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv} else: assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data) title = "%s structural variants" % dd.get_sample_name(data) summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None} return data
[ "def", "evaluate", "(", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"structural\"", ",", "dd", ".", "get_sample_name", "(", "data"...
Provide evaluations for multiple callers split by structural variant type.
[ "Provide", "evaluations", "for", "multiple", "callers", "split", "by", "structural", "variant", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L375-L392
223,218
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
_add_region_params
def _add_region_params(region, out_file, items, gatk_type): """Add parameters for selecting by region to command line. """ params = [] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += gatk.standard_cl_params(items) return params
python
def _add_region_params(region, out_file, items, gatk_type): """Add parameters for selecting by region to command line. """ params = [] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += gatk.standard_cl_params(items) return params
[ "def", "_add_region_params", "(", "region", ",", "out_file", ",", "items", ",", "gatk_type", ")", ":", "params", "=", "[", "]", "variant_regions", "=", "bedutils", ".", "population_variant_regions", "(", "items", ")", "region", "=", "subset_variant_regions", "("...
Add parameters for selecting by region to command line.
[ "Add", "parameters", "for", "selecting", "by", "region", "to", "command", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L48-L60
223,219
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
_prep_inputs
def _prep_inputs(align_bams, ref_file, items): """Ensure inputs to calling are indexed as expected. """ broad_runner = broad.runner_from_path("picard", items[0]["config"]) broad_runner.run_fn("picard_index_ref", ref_file) for x in align_bams: bam.index(x, items[0]["config"])
python
def _prep_inputs(align_bams, ref_file, items): """Ensure inputs to calling are indexed as expected. """ broad_runner = broad.runner_from_path("picard", items[0]["config"]) broad_runner.run_fn("picard_index_ref", ref_file) for x in align_bams: bam.index(x, items[0]["config"])
[ "def", "_prep_inputs", "(", "align_bams", ",", "ref_file", ",", "items", ")", ":", "broad_runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "items", "[", "0", "]", "[", "\"config\"", "]", ")", "broad_runner", ".", "run_fn", "(", "\"pi...
Ensure inputs to calling are indexed as expected.
[ "Ensure", "inputs", "to", "calling", "are", "indexed", "as", "expected", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L70-L76
223,220
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
mutect2_caller
def mutect2_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Call variation with GATK's MuTect2. This requires the full non open-source version of GATK 3.5+. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): paired = vcfutils.get_paired_bams(align_bams, items) broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() _prep_inputs(align_bams, ref_file, items) with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "Mutect2" if gatk_type == "gatk4" else "MuTect2", "--annotation", "ClippingRankSumTest", "--annotation", "DepthPerSampleHC"] if gatk_type == "gatk4": params += ["--reference", ref_file] else: params += ["-R", ref_file] for a in annotation.get_gatk_annotations(items[0]["config"], include_baseqranksum=False): params += ["--annotation", a] # Avoid issues with BAM CIGAR reads that GATK doesn't like if gatk_type == "gatk4": params += ["--read-validation-stringency", "LENIENT"] params += _add_tumor_params(paired, items, gatk_type) params += _add_region_params(region, out_file, items, gatk_type) # Avoid adding dbSNP/Cosmic so they do not get fed to variant filtering algorithm # Not yet clear how this helps or hurts in a general case. #params += _add_assoc_params(assoc_files) resources = config_utils.get_resources("mutect2", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] assert LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"), \ "Require full version of GATK 3.5+ for mutect2 calling" broad_runner.new_resources("mutect2") gatk_cmd = broad_runner.cl_gatk(params, os.path.dirname(tx_out_file)) if gatk_type == "gatk4": tx_raw_prefilt_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) tx_raw_file = "%s-raw-filt%s" % utils.splitext_plus(tx_out_file) filter_cmd = _mutect2_filter(broad_runner, tx_raw_prefilt_file, tx_raw_file, ref_file) cmd = "{gatk_cmd} -O {tx_raw_prefilt_file} && {filter_cmd}" else: tx_raw_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) cmd = "{gatk_cmd} > {tx_raw_file}" do.run(cmd.format(**locals()), "MuTect2") out_file = _af_filter(paired.tumor_data, tx_raw_file, out_file) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
python
def mutect2_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Call variation with GATK's MuTect2. This requires the full non open-source version of GATK 3.5+. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): paired = vcfutils.get_paired_bams(align_bams, items) broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() _prep_inputs(align_bams, ref_file, items) with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "Mutect2" if gatk_type == "gatk4" else "MuTect2", "--annotation", "ClippingRankSumTest", "--annotation", "DepthPerSampleHC"] if gatk_type == "gatk4": params += ["--reference", ref_file] else: params += ["-R", ref_file] for a in annotation.get_gatk_annotations(items[0]["config"], include_baseqranksum=False): params += ["--annotation", a] # Avoid issues with BAM CIGAR reads that GATK doesn't like if gatk_type == "gatk4": params += ["--read-validation-stringency", "LENIENT"] params += _add_tumor_params(paired, items, gatk_type) params += _add_region_params(region, out_file, items, gatk_type) # Avoid adding dbSNP/Cosmic so they do not get fed to variant filtering algorithm # Not yet clear how this helps or hurts in a general case. #params += _add_assoc_params(assoc_files) resources = config_utils.get_resources("mutect2", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] assert LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"), \ "Require full version of GATK 3.5+ for mutect2 calling" broad_runner.new_resources("mutect2") gatk_cmd = broad_runner.cl_gatk(params, os.path.dirname(tx_out_file)) if gatk_type == "gatk4": tx_raw_prefilt_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) tx_raw_file = "%s-raw-filt%s" % utils.splitext_plus(tx_out_file) filter_cmd = _mutect2_filter(broad_runner, tx_raw_prefilt_file, tx_raw_file, ref_file) cmd = "{gatk_cmd} -O {tx_raw_prefilt_file} && {filter_cmd}" else: tx_raw_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) cmd = "{gatk_cmd} > {tx_raw_file}" do.run(cmd.format(**locals()), "MuTect2") out_file = _af_filter(paired.tumor_data, tx_raw_file, out_file) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
[ "def", "mutect2_caller", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s-variants.vcf.gz\"", "%", "utils", ...
Call variation with GATK's MuTect2. This requires the full non open-source version of GATK 3.5+.
[ "Call", "variation", "with", "GATK", "s", "MuTect2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L78-L126
223,221
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
_mutect2_filter
def _mutect2_filter(broad_runner, in_file, out_file, ref_file): """Filter of MuTect2 calls, a separate step in GATK4. """ params = ["-T", "FilterMutectCalls", "--reference", ref_file, "--variant", in_file, "--output", out_file] return broad_runner.cl_gatk(params, os.path.dirname(out_file))
python
def _mutect2_filter(broad_runner, in_file, out_file, ref_file): """Filter of MuTect2 calls, a separate step in GATK4. """ params = ["-T", "FilterMutectCalls", "--reference", ref_file, "--variant", in_file, "--output", out_file] return broad_runner.cl_gatk(params, os.path.dirname(out_file))
[ "def", "_mutect2_filter", "(", "broad_runner", ",", "in_file", ",", "out_file", ",", "ref_file", ")", ":", "params", "=", "[", "\"-T\"", ",", "\"FilterMutectCalls\"", ",", "\"--reference\"", ",", "ref_file", ",", "\"--variant\"", ",", "in_file", ",", "\"--output...
Filter of MuTect2 calls, a separate step in GATK4.
[ "Filter", "of", "MuTect2", "calls", "a", "separate", "step", "in", "GATK4", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L128-L132
223,222
bcbio/bcbio-nextgen
bcbio/upload/irods.py
update_file
def update_file(finfo, sample_info, config): """ Update the file to an iRODS repository. """ ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True) _upload_dir_icommands_cli(config.get("dir"), config.get("folder"), config)
python
def update_file(finfo, sample_info, config): """ Update the file to an iRODS repository. """ ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True) _upload_dir_icommands_cli(config.get("dir"), config.get("folder"), config)
[ "def", "update_file", "(", "finfo", ",", "sample_info", ",", "config", ")", ":", "ffinal", "=", "filesystem", ".", "update_file", "(", "finfo", ",", "sample_info", ",", "config", ",", "pass_uptodate", "=", "True", ")", "_upload_dir_icommands_cli", "(", "config...
Update the file to an iRODS repository.
[ "Update", "the", "file", "to", "an", "iRODS", "repository", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/irods.py#L25-L31
223,223
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_get_callers
def _get_callers(items, stage, special_cases=False): """Retrieve available callers for the provided stage. Handles special cases like CNVkit that can be in initial or standard depending on if fed into Lumpy analysis. """ callers = utils.deepish_copy(_CALLERS[stage]) if special_cases and "cnvkit" in callers: has_lumpy = any("lumpy" in get_svcallers(d) or "lumpy" in d["config"]["algorithm"].get("svcaller_orig", []) for d in items) if has_lumpy and any("lumpy_usecnv" in dd.get_tools_on(d) for d in items): if stage != "initial": del callers["cnvkit"] else: if stage != "standard": del callers["cnvkit"] return callers
python
def _get_callers(items, stage, special_cases=False): """Retrieve available callers for the provided stage. Handles special cases like CNVkit that can be in initial or standard depending on if fed into Lumpy analysis. """ callers = utils.deepish_copy(_CALLERS[stage]) if special_cases and "cnvkit" in callers: has_lumpy = any("lumpy" in get_svcallers(d) or "lumpy" in d["config"]["algorithm"].get("svcaller_orig", []) for d in items) if has_lumpy and any("lumpy_usecnv" in dd.get_tools_on(d) for d in items): if stage != "initial": del callers["cnvkit"] else: if stage != "standard": del callers["cnvkit"] return callers
[ "def", "_get_callers", "(", "items", ",", "stage", ",", "special_cases", "=", "False", ")", ":", "callers", "=", "utils", ".", "deepish_copy", "(", "_CALLERS", "[", "stage", "]", ")", "if", "special_cases", "and", "\"cnvkit\"", "in", "callers", ":", "has_l...
Retrieve available callers for the provided stage. Handles special cases like CNVkit that can be in initial or standard depending on if fed into Lumpy analysis.
[ "Retrieve", "available", "callers", "for", "the", "provided", "stage", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L38-L54
223,224
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_handle_multiple_svcallers
def _handle_multiple_svcallers(data, stage): """Retrieve configured structural variation caller, handling multiple. """ svs = get_svcallers(data) # special cases -- prioritization if stage == "ensemble" and dd.get_svprioritize(data): svs.append("prioritize") out = [] for svcaller in svs: if svcaller in _get_callers([data], stage): base = copy.deepcopy(data) # clean SV callers present in multiple rounds and not this caller final_svs = [] for sv in data.get("sv", []): if (stage == "ensemble" or sv["variantcaller"] == svcaller or sv["variantcaller"] not in svs or svcaller not in _get_callers([data], stage, special_cases=True)): final_svs.append(sv) base["sv"] = final_svs base["config"]["algorithm"]["svcaller"] = svcaller base["config"]["algorithm"]["svcaller_orig"] = svs out.append(base) return out
python
def _handle_multiple_svcallers(data, stage): """Retrieve configured structural variation caller, handling multiple. """ svs = get_svcallers(data) # special cases -- prioritization if stage == "ensemble" and dd.get_svprioritize(data): svs.append("prioritize") out = [] for svcaller in svs: if svcaller in _get_callers([data], stage): base = copy.deepcopy(data) # clean SV callers present in multiple rounds and not this caller final_svs = [] for sv in data.get("sv", []): if (stage == "ensemble" or sv["variantcaller"] == svcaller or sv["variantcaller"] not in svs or svcaller not in _get_callers([data], stage, special_cases=True)): final_svs.append(sv) base["sv"] = final_svs base["config"]["algorithm"]["svcaller"] = svcaller base["config"]["algorithm"]["svcaller_orig"] = svs out.append(base) return out
[ "def", "_handle_multiple_svcallers", "(", "data", ",", "stage", ")", ":", "svs", "=", "get_svcallers", "(", "data", ")", "# special cases -- prioritization", "if", "stage", "==", "\"ensemble\"", "and", "dd", ".", "get_svprioritize", "(", "data", ")", ":", "svs",...
Retrieve configured structural variation caller, handling multiple.
[ "Retrieve", "configured", "structural", "variation", "caller", "handling", "multiple", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L64-L85
223,225
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
finalize_sv
def finalize_sv(samples, config): """Combine results from multiple sv callers into a single ordered 'sv' key. """ by_bam = collections.OrderedDict() for x in samples: batch = dd.get_batch(x) or [dd.get_sample_name(x)] try: by_bam[x["align_bam"], tuple(batch)].append(x) except KeyError: by_bam[x["align_bam"], tuple(batch)] = [x] by_batch = collections.OrderedDict() lead_batches = {} for grouped_calls in by_bam.values(): def orig_svcaller_order(x): orig_callers = tz.get_in(["config", "algorithm", "svcaller_orig"], x) cur_caller = tz.get_in(["config", "algorithm", "svcaller"], x) return orig_callers.index(cur_caller) sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x], key=orig_svcaller_order) final = grouped_calls[0] if len(sorted_svcalls) > 0: final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls]) final["config"]["algorithm"]["svcaller"] = final["config"]["algorithm"].pop("svcaller_orig") batch = dd.get_batch(final) or dd.get_sample_name(final) batches = batch if isinstance(batch, (list, tuple)) else [batch] if len(batches) > 1: lead_batches[(dd.get_sample_name(final), dd.get_phenotype(final) == "germline")] = batches[0] for batch in batches: try: by_batch[batch].append(final) except KeyError: by_batch[batch] = [final] out = [] for batch, items in by_batch.items(): if any("svplots" in dd.get_tools_on(d) for d in items): items = plot.by_regions(items) for data in items: if lead_batches.get((dd.get_sample_name(data), dd.get_phenotype(data) == "germline")) in [batch, None]: out.append([data]) return out
python
def finalize_sv(samples, config): """Combine results from multiple sv callers into a single ordered 'sv' key. """ by_bam = collections.OrderedDict() for x in samples: batch = dd.get_batch(x) or [dd.get_sample_name(x)] try: by_bam[x["align_bam"], tuple(batch)].append(x) except KeyError: by_bam[x["align_bam"], tuple(batch)] = [x] by_batch = collections.OrderedDict() lead_batches = {} for grouped_calls in by_bam.values(): def orig_svcaller_order(x): orig_callers = tz.get_in(["config", "algorithm", "svcaller_orig"], x) cur_caller = tz.get_in(["config", "algorithm", "svcaller"], x) return orig_callers.index(cur_caller) sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x], key=orig_svcaller_order) final = grouped_calls[0] if len(sorted_svcalls) > 0: final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls]) final["config"]["algorithm"]["svcaller"] = final["config"]["algorithm"].pop("svcaller_orig") batch = dd.get_batch(final) or dd.get_sample_name(final) batches = batch if isinstance(batch, (list, tuple)) else [batch] if len(batches) > 1: lead_batches[(dd.get_sample_name(final), dd.get_phenotype(final) == "germline")] = batches[0] for batch in batches: try: by_batch[batch].append(final) except KeyError: by_batch[batch] = [final] out = [] for batch, items in by_batch.items(): if any("svplots" in dd.get_tools_on(d) for d in items): items = plot.by_regions(items) for data in items: if lead_batches.get((dd.get_sample_name(data), dd.get_phenotype(data) == "germline")) in [batch, None]: out.append([data]) return out
[ "def", "finalize_sv", "(", "samples", ",", "config", ")", ":", "by_bam", "=", "collections", ".", "OrderedDict", "(", ")", "for", "x", "in", "samples", ":", "batch", "=", "dd", ".", "get_batch", "(", "x", ")", "or", "[", "dd", ".", "get_sample_name", ...
Combine results from multiple sv callers into a single ordered 'sv' key.
[ "Combine", "results", "from", "multiple", "sv", "callers", "into", "a", "single", "ordered", "sv", "key", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L87-L126
223,226
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
batch_for_sv
def batch_for_sv(samples): """Prepare a set of samples for parallel structural variant calling. CWL input target -- groups samples into batches and structural variant callers for parallel processing. """ samples = cwlutils.assign_complex_to_samples(samples) to_process, extras, background = _batch_split_by_sv(samples, "standard") out = [cwlutils.samples_to_records(xs) for xs in to_process.values()] + extras return out
python
def batch_for_sv(samples): """Prepare a set of samples for parallel structural variant calling. CWL input target -- groups samples into batches and structural variant callers for parallel processing. """ samples = cwlutils.assign_complex_to_samples(samples) to_process, extras, background = _batch_split_by_sv(samples, "standard") out = [cwlutils.samples_to_records(xs) for xs in to_process.values()] + extras return out
[ "def", "batch_for_sv", "(", "samples", ")", ":", "samples", "=", "cwlutils", ".", "assign_complex_to_samples", "(", "samples", ")", "to_process", ",", "extras", ",", "background", "=", "_batch_split_by_sv", "(", "samples", ",", "\"standard\"", ")", "out", "=", ...
Prepare a set of samples for parallel structural variant calling. CWL input target -- groups samples into batches and structural variant callers for parallel processing.
[ "Prepare", "a", "set", "of", "samples", "for", "parallel", "structural", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L133-L142
223,227
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
run
def run(samples, run_parallel, stage): """Run structural variation detection. The stage indicates which level of structural variant calling to run. - initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy) - standard, regular batch calling - ensemble, post-calling, combine other callers or prioritize results """ to_process, extras, background = _batch_split_by_sv(samples, stage) processed = run_parallel("detect_sv", ([xs, background, stage] for xs in to_process.values())) finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])]) if len(processed) > 0 else []) return extras + finalized
python
def run(samples, run_parallel, stage): """Run structural variation detection. The stage indicates which level of structural variant calling to run. - initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy) - standard, regular batch calling - ensemble, post-calling, combine other callers or prioritize results """ to_process, extras, background = _batch_split_by_sv(samples, stage) processed = run_parallel("detect_sv", ([xs, background, stage] for xs in to_process.values())) finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])]) if len(processed) > 0 else []) return extras + finalized
[ "def", "run", "(", "samples", ",", "run_parallel", ",", "stage", ")", ":", "to_process", ",", "extras", ",", "background", "=", "_batch_split_by_sv", "(", "samples", ",", "stage", ")", "processed", "=", "run_parallel", "(", "\"detect_sv\"", ",", "(", "[", ...
Run structural variation detection. The stage indicates which level of structural variant calling to run. - initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy) - standard, regular batch calling - ensemble, post-calling, combine other callers or prioritize results
[ "Run", "structural", "variation", "detection", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L174-L187
223,228
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
detect_sv
def detect_sv(items, all_items=None, stage="standard"): """Top level parallel target for examining structural variation. """ items = [utils.to_single_data(x) for x in items] items = cwlutils.unpack_tarballs(items, items[0]) svcaller = items[0]["config"]["algorithm"].get("svcaller") caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller) out = [] if svcaller and caller_fn: if (all_items and svcaller in _NEEDS_BACKGROUND and not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)): names = set([dd.get_sample_name(x) for x in items]) background = [x for x in all_items if dd.get_sample_name(x) not in names] for svdata in caller_fn(items, background): out.append([svdata]) else: for svdata in caller_fn(items): out.append([svdata]) else: for data in items: out.append([data]) # Avoid nesting of callers for CWL runs for easier extraction if cwlutils.is_cwl_run(items[0]): out_cwl = [] for data in [utils.to_single_data(x) for x in out]: # Run validation directly from CWL runs since we're single stage data = validate.evaluate(data) data["svvalidate"] = {"summary": tz.get_in(["sv-validate", "csv"], data)} svs = data.get("sv") if svs: assert len(svs) == 1, svs data["sv"] = svs[0] else: data["sv"] = {} data = _add_supplemental(data) out_cwl.append([data]) return out_cwl return out
python
def detect_sv(items, all_items=None, stage="standard"): """Top level parallel target for examining structural variation. """ items = [utils.to_single_data(x) for x in items] items = cwlutils.unpack_tarballs(items, items[0]) svcaller = items[0]["config"]["algorithm"].get("svcaller") caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller) out = [] if svcaller and caller_fn: if (all_items and svcaller in _NEEDS_BACKGROUND and not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)): names = set([dd.get_sample_name(x) for x in items]) background = [x for x in all_items if dd.get_sample_name(x) not in names] for svdata in caller_fn(items, background): out.append([svdata]) else: for svdata in caller_fn(items): out.append([svdata]) else: for data in items: out.append([data]) # Avoid nesting of callers for CWL runs for easier extraction if cwlutils.is_cwl_run(items[0]): out_cwl = [] for data in [utils.to_single_data(x) for x in out]: # Run validation directly from CWL runs since we're single stage data = validate.evaluate(data) data["svvalidate"] = {"summary": tz.get_in(["sv-validate", "csv"], data)} svs = data.get("sv") if svs: assert len(svs) == 1, svs data["sv"] = svs[0] else: data["sv"] = {} data = _add_supplemental(data) out_cwl.append([data]) return out_cwl return out
[ "def", "detect_sv", "(", "items", ",", "all_items", "=", "None", ",", "stage", "=", "\"standard\"", ")", ":", "items", "=", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "items", "]", "items", "=", "cwlutils", ".", "unpack_tarba...
Top level parallel target for examining structural variation.
[ "Top", "level", "parallel", "target", "for", "examining", "structural", "variation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L189-L226
223,229
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_add_supplemental
def _add_supplemental(data): """Add additional supplemental files to CWL sv output, give useful names. """ if "supplemental" not in data["sv"]: data["sv"]["supplemental"] = [] if data["sv"].get("variantcaller"): cur_name = _useful_basename(data) for k in ["cns", "vrn_bed"]: if data["sv"].get(k) and os.path.exists(data["sv"][k]): dname, orig = os.path.split(data["sv"][k]) orig_base, orig_ext = utils.splitext_plus(orig) orig_base = _clean_name(orig_base, data) if orig_base: fname = "%s-%s%s" % (cur_name, orig_base, orig_ext) else: fname = "%s%s" % (cur_name, orig_ext) sup_out_file = os.path.join(dname, fname) utils.symlink_plus(data["sv"][k], sup_out_file) data["sv"]["supplemental"].append(sup_out_file) return data
python
def _add_supplemental(data): """Add additional supplemental files to CWL sv output, give useful names. """ if "supplemental" not in data["sv"]: data["sv"]["supplemental"] = [] if data["sv"].get("variantcaller"): cur_name = _useful_basename(data) for k in ["cns", "vrn_bed"]: if data["sv"].get(k) and os.path.exists(data["sv"][k]): dname, orig = os.path.split(data["sv"][k]) orig_base, orig_ext = utils.splitext_plus(orig) orig_base = _clean_name(orig_base, data) if orig_base: fname = "%s-%s%s" % (cur_name, orig_base, orig_ext) else: fname = "%s%s" % (cur_name, orig_ext) sup_out_file = os.path.join(dname, fname) utils.symlink_plus(data["sv"][k], sup_out_file) data["sv"]["supplemental"].append(sup_out_file) return data
[ "def", "_add_supplemental", "(", "data", ")", ":", "if", "\"supplemental\"", "not", "in", "data", "[", "\"sv\"", "]", ":", "data", "[", "\"sv\"", "]", "[", "\"supplemental\"", "]", "=", "[", "]", "if", "data", "[", "\"sv\"", "]", ".", "get", "(", "\"...
Add additional supplemental files to CWL sv output, give useful names.
[ "Add", "additional", "supplemental", "files", "to", "CWL", "sv", "output", "give", "useful", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L228-L247
223,230
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_clean_name
def _clean_name(fname, data): """Remove standard prefixes from a filename before renaming with useful names. """ for to_remove in dd.get_batches(data) + [dd.get_sample_name(data), data["sv"]["variantcaller"]]: for ext in ("-", "_"): if fname.startswith("%s%s" % (to_remove, ext)): fname = fname[len(to_remove) + len(ext):] if fname.startswith(to_remove): fname = fname[len(to_remove):] return fname
python
def _clean_name(fname, data): """Remove standard prefixes from a filename before renaming with useful names. """ for to_remove in dd.get_batches(data) + [dd.get_sample_name(data), data["sv"]["variantcaller"]]: for ext in ("-", "_"): if fname.startswith("%s%s" % (to_remove, ext)): fname = fname[len(to_remove) + len(ext):] if fname.startswith(to_remove): fname = fname[len(to_remove):] return fname
[ "def", "_clean_name", "(", "fname", ",", "data", ")", ":", "for", "to_remove", "in", "dd", ".", "get_batches", "(", "data", ")", "+", "[", "dd", ".", "get_sample_name", "(", "data", ")", ",", "data", "[", "\"sv\"", "]", "[", "\"variantcaller\"", "]", ...
Remove standard prefixes from a filename before renaming with useful names.
[ "Remove", "standard", "prefixes", "from", "a", "filename", "before", "renaming", "with", "useful", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L249-L258
223,231
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_group_by_sample
def _group_by_sample(items): """Group a set of items by sample names + multiple callers for prioritization """ by_sample = collections.defaultdict(list) for d in items: by_sample[dd.get_sample_name(d)].append(d) out = [] for sample_group in by_sample.values(): cur = utils.deepish_copy(sample_group[0]) svs = [] for d in sample_group: svs.append(d["sv"]) cur["sv"] = svs out.append(cur) return out
python
def _group_by_sample(items): """Group a set of items by sample names + multiple callers for prioritization """ by_sample = collections.defaultdict(list) for d in items: by_sample[dd.get_sample_name(d)].append(d) out = [] for sample_group in by_sample.values(): cur = utils.deepish_copy(sample_group[0]) svs = [] for d in sample_group: svs.append(d["sv"]) cur["sv"] = svs out.append(cur) return out
[ "def", "_group_by_sample", "(", "items", ")", ":", "by_sample", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "d", "in", "items", ":", "by_sample", "[", "dd", ".", "get_sample_name", "(", "d", ")", "]", ".", "append", "(", "d", ")", ...
Group a set of items by sample names + multiple callers for prioritization
[ "Group", "a", "set", "of", "items", "by", "sample", "names", "+", "multiple", "callers", "for", "prioritization" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L269-L283
223,232
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
standardize_cnv_reference
def standardize_cnv_reference(data): """Standardize cnv_reference background to support multiple callers. """ out = tz.get_in(["config", "algorithm", "background", "cnv_reference"], data, {}) cur_callers = set(data["config"]["algorithm"].get("svcaller")) & _CNV_REFERENCE if isinstance(out, six.string_types): if not len(cur_callers) == 1: raise ValueError("Multiple CNV callers and single background reference for %s: %s" % data["description"], list(cur_callers)) else: out = {cur_callers.pop(): out} return out
python
def standardize_cnv_reference(data): """Standardize cnv_reference background to support multiple callers. """ out = tz.get_in(["config", "algorithm", "background", "cnv_reference"], data, {}) cur_callers = set(data["config"]["algorithm"].get("svcaller")) & _CNV_REFERENCE if isinstance(out, six.string_types): if not len(cur_callers) == 1: raise ValueError("Multiple CNV callers and single background reference for %s: %s" % data["description"], list(cur_callers)) else: out = {cur_callers.pop(): out} return out
[ "def", "standardize_cnv_reference", "(", "data", ")", ":", "out", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"background\"", ",", "\"cnv_reference\"", "]", ",", "data", ",", "{", "}", ")", "cur_callers", "=", "set", "(",...
Standardize cnv_reference background to support multiple callers.
[ "Standardize", "cnv_reference", "background", "to", "support", "multiple", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L324-L335
223,233
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie2.py
_bowtie2_args_from_config
def _bowtie2_args_from_config(config, curcl): """Configurable high level options for bowtie2. """ qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] user_opts = config_utils.get_resources("bowtie2", config).get("options", []) for flag_opt in (o for o in user_opts if str(o).startswith("-")): if flag_opt in curcl: raise ValueError("Duplicate option %s in resources and bcbio commandline: %s %s" % flag_opt, user_opts, curcl) return core_flags + qual_flags + user_opts
python
def _bowtie2_args_from_config(config, curcl): """Configurable high level options for bowtie2. """ qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] user_opts = config_utils.get_resources("bowtie2", config).get("options", []) for flag_opt in (o for o in user_opts if str(o).startswith("-")): if flag_opt in curcl: raise ValueError("Duplicate option %s in resources and bcbio commandline: %s %s" % flag_opt, user_opts, curcl) return core_flags + qual_flags + user_opts
[ "def", "_bowtie2_args_from_config", "(", "config", ",", "curcl", ")", ":", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", "if", "qual_format", ".", "lower", "(", ")", "==", "\"illumina\"", ":...
Configurable high level options for bowtie2.
[ "Configurable", "high", "level", "options", "for", "bowtie2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie2.py#L15-L30
223,234
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie2.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with bowtie2. """ config = data["config"] analysis_config = ANALYSIS.get(data["analysis"].lower()) assert analysis_config, "Analysis %s is not supported by bowtie2" % (data["analysis"]) out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie2", config)] cl += extra_args if extra_args is not None else [] cl += ["-q", "-x", ref_file] cl += analysis_config.get("params", []) if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += ["-U", fastq_file] if names and "rg" in names: cl += ["--rg-id", names["rg"]] for key, tag in [("sample", "SM"), ("pl", "PL"), ("pu", "PU"), ("lb", "LB")]: if names.get(key): cl += ["--rg", "%s:%s" % (tag, names[key])] cl += _bowtie2_args_from_config(config, cl) cl = [str(i) for i in cl] cmd = "unset JAVA_HOME && " + " ".join(cl) + " | " + tobam_cl do.run(cmd, "Aligning %s and %s with Bowtie2." % (fastq_file, pair_file)) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with bowtie2. """ config = data["config"] analysis_config = ANALYSIS.get(data["analysis"].lower()) assert analysis_config, "Analysis %s is not supported by bowtie2" % (data["analysis"]) out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie2", config)] cl += extra_args if extra_args is not None else [] cl += ["-q", "-x", ref_file] cl += analysis_config.get("params", []) if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += ["-U", fastq_file] if names and "rg" in names: cl += ["--rg-id", names["rg"]] for key, tag in [("sample", "SM"), ("pl", "PL"), ("pu", "PU"), ("lb", "LB")]: if names.get(key): cl += ["--rg", "%s:%s" % (tag, names[key])] cl += _bowtie2_args_from_config(config, cl) cl = [str(i) for i in cl] cmd = "unset JAVA_HOME && " + " ".join(cl) + " | " + tobam_cl do.run(cmd, "Aligning %s and %s with Bowtie2." % (fastq_file, pair_file)) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "analysis_config", "=", "ANALYSIS", ".", "get", "(...
Alignment with bowtie2.
[ "Alignment", "with", "bowtie2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie2.py#L32-L66
223,235
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
create_cromwell_config
def create_cromwell_config(args, work_dir, sample_file): """Prepare a cromwell configuration within the current working directory. """ docker_attrs = ["String? docker", "String? docker_user"] cwl_attrs = ["Int? cpuMin", "Int? cpuMax", "Int? memoryMin", "Int? memoryMax", "String? outDirMin", "String? outDirMax", "String? tmpDirMin", "String? tmpDirMax"] out_file = os.path.join(work_dir, "bcbio-cromwell.conf") run_config = _load_custom_config(args.runconfig) if args.runconfig else {} # Avoid overscheduling jobs for local runs by limiting concurrent jobs # Longer term would like to keep these within defined core window joblimit = args.joblimit if joblimit == 0 and not args.scheduler: joblimit = 1 file_types = _get_filesystem_types(args, sample_file) std_args = {"docker_attrs": "" if args.no_container else "\n ".join(docker_attrs), "submit_docker": 'submit-docker: ""' if args.no_container else "", "joblimit": "concurrent-job-limit = %s" % (joblimit) if joblimit > 0 else "", "cwl_attrs": "\n ".join(cwl_attrs), "filesystem": _get_filesystem_config(file_types), "database": run_config.get("database", DATABASE_CONFIG % {"work_dir": work_dir})} cl_args, conf_args, scheduler, cloud_type = _args_to_cromwell(args) std_args["engine"] = _get_engine_filesystem_config(file_types, args, conf_args) conf_args.update(std_args) main_config = {"hpc": (HPC_CONFIGS[scheduler] % conf_args) if scheduler else "", "cloud": (CLOUD_CONFIGS[cloud_type] % conf_args) if cloud_type else "", "work_dir": work_dir} main_config.update(std_args) # Local run always seems to need docker set because of submit-docker in default configuration # Can we unset submit-docker based on configuration so it doesn't inherit? # main_config["docker_attrs"] = "\n ".join(docker_attrs) with open(out_file, "w") as out_handle: out_handle.write(CROMWELL_CONFIG % main_config) return out_file
python
def create_cromwell_config(args, work_dir, sample_file): """Prepare a cromwell configuration within the current working directory. """ docker_attrs = ["String? docker", "String? docker_user"] cwl_attrs = ["Int? cpuMin", "Int? cpuMax", "Int? memoryMin", "Int? memoryMax", "String? outDirMin", "String? outDirMax", "String? tmpDirMin", "String? tmpDirMax"] out_file = os.path.join(work_dir, "bcbio-cromwell.conf") run_config = _load_custom_config(args.runconfig) if args.runconfig else {} # Avoid overscheduling jobs for local runs by limiting concurrent jobs # Longer term would like to keep these within defined core window joblimit = args.joblimit if joblimit == 0 and not args.scheduler: joblimit = 1 file_types = _get_filesystem_types(args, sample_file) std_args = {"docker_attrs": "" if args.no_container else "\n ".join(docker_attrs), "submit_docker": 'submit-docker: ""' if args.no_container else "", "joblimit": "concurrent-job-limit = %s" % (joblimit) if joblimit > 0 else "", "cwl_attrs": "\n ".join(cwl_attrs), "filesystem": _get_filesystem_config(file_types), "database": run_config.get("database", DATABASE_CONFIG % {"work_dir": work_dir})} cl_args, conf_args, scheduler, cloud_type = _args_to_cromwell(args) std_args["engine"] = _get_engine_filesystem_config(file_types, args, conf_args) conf_args.update(std_args) main_config = {"hpc": (HPC_CONFIGS[scheduler] % conf_args) if scheduler else "", "cloud": (CLOUD_CONFIGS[cloud_type] % conf_args) if cloud_type else "", "work_dir": work_dir} main_config.update(std_args) # Local run always seems to need docker set because of submit-docker in default configuration # Can we unset submit-docker based on configuration so it doesn't inherit? # main_config["docker_attrs"] = "\n ".join(docker_attrs) with open(out_file, "w") as out_handle: out_handle.write(CROMWELL_CONFIG % main_config) return out_file
[ "def", "create_cromwell_config", "(", "args", ",", "work_dir", ",", "sample_file", ")", ":", "docker_attrs", "=", "[", "\"String? docker\"", ",", "\"String? docker_user\"", "]", "cwl_attrs", "=", "[", "\"Int? cpuMin\"", ",", "\"Int? cpuMax\"", ",", "\"Int? memoryMin\"...
Prepare a cromwell configuration within the current working directory.
[ "Prepare", "a", "cromwell", "configuration", "within", "the", "current", "working", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L8-L40
223,236
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_file_paths
def _get_file_paths(cur): """Retrieve a list of file paths, recursively traversing the """ out = [] if isinstance(cur, (list, tuple)): for x in cur: new = _get_file_paths(x) if new: out.extend(new) elif isinstance(cur, dict): if "class" in cur: out.append(cur["path"]) else: for k, v in cur.items(): new = _get_file_paths(v) if new: out.extend(new) return out
python
def _get_file_paths(cur): """Retrieve a list of file paths, recursively traversing the """ out = [] if isinstance(cur, (list, tuple)): for x in cur: new = _get_file_paths(x) if new: out.extend(new) elif isinstance(cur, dict): if "class" in cur: out.append(cur["path"]) else: for k, v in cur.items(): new = _get_file_paths(v) if new: out.extend(new) return out
[ "def", "_get_file_paths", "(", "cur", ")", ":", "out", "=", "[", "]", "if", "isinstance", "(", "cur", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "x", "in", "cur", ":", "new", "=", "_get_file_paths", "(", "x", ")", "if", "new", ":", "...
Retrieve a list of file paths, recursively traversing the
[ "Retrieve", "a", "list", "of", "file", "paths", "recursively", "traversing", "the" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L42-L59
223,237
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_load_custom_config
def _load_custom_config(run_config): """Load custom configuration input HOCON file for cromwell. """ from pyhocon import ConfigFactory, HOCONConverter, ConfigTree conf = ConfigFactory.parse_file(run_config) out = {} if "database" in conf: out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")})) return out
python
def _load_custom_config(run_config): """Load custom configuration input HOCON file for cromwell. """ from pyhocon import ConfigFactory, HOCONConverter, ConfigTree conf = ConfigFactory.parse_file(run_config) out = {} if "database" in conf: out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")})) return out
[ "def", "_load_custom_config", "(", "run_config", ")", ":", "from", "pyhocon", "import", "ConfigFactory", ",", "HOCONConverter", ",", "ConfigTree", "conf", "=", "ConfigFactory", ".", "parse_file", "(", "run_config", ")", "out", "=", "{", "}", "if", "\"database\""...
Load custom configuration input HOCON file for cromwell.
[ "Load", "custom", "configuration", "input", "HOCON", "file", "for", "cromwell", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L61-L69
223,238
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_args_to_cromwell
def _args_to_cromwell(args): """Convert input arguments into cromwell inputs for config and command line. """ default_config = {"slurm": {"timelimit": "1-00:00", "account": ""}, "sge": {"memtype": "mem_free", "pename": "smp"}, "lsf": {"walltime": "24:00", "account": ""}, "htcondor": {}, "torque": {"walltime": "24:00:00", "account": ""}, "pbspro": {"walltime": "24:00:00", "account": "", "cpu_and_mem": "-l select=1:ncpus=${cpu}:mem=${memory_mb}mb"}} prefixes = {("account", "slurm"): "-A ", ("account", "pbspro"): "-A "} custom = {("noselect", "pbspro"): ("cpu_and_mem", "-l ncpus=${cpu} -l mem=${memory_mb}mb")} cl = [] config = {} # HPC scheduling if args.scheduler: if args.scheduler not in default_config: raise ValueError("Scheduler not yet supported by Cromwell: %s" % args.scheduler) if not args.queue and args.scheduler not in ["htcondor"]: raise ValueError("Need to set queue (-q) for running with an HPC scheduler") config = default_config[args.scheduler] cl.append("-Dbackend.default=%s" % args.scheduler.upper()) config["queue"] = args.queue for rs in args.resources: for r in rs.split(";"): parts = r.split("=") if len(parts) == 2: key, val = parts config[key] = prefixes.get((key, args.scheduler), "") + val elif len(parts) == 1 and (parts[0], args.scheduler) in custom: key, val = custom[(parts[0], args.scheduler)] config[key] = val cloud_type = None if args.cloud_project: if args.cloud_root and args.cloud_root.startswith("gs:"): cloud_type = "PAPI" cloud_root = args.cloud_root cloud_region = None elif ((args.cloud_root and args.cloud_root.startswith("s3:")) or (args.cloud_project and args.cloud_project.startswith("arn:"))): cloud_type = "AWSBATCH" cloud_root = args.cloud_root if not cloud_root.startswith("s3://"): cloud_root = "s3://%s" % cloud_root # split region from input Amazon Resource Name, ie arn:aws:batch:us-east-1: cloud_region = args.cloud_project.split(":")[3] else: raise ValueError("Unexpected inputs for Cromwell Cloud support: %s %s" % (args.cloud_project, args.cloud_root)) config = {"cloud_project": args.cloud_project, "cloud_root": cloud_root, "cloud_region": cloud_region} cl.append("-Dbackend.default=%s" % cloud_type) return cl, config, args.scheduler, cloud_type
python
def _args_to_cromwell(args): """Convert input arguments into cromwell inputs for config and command line. """ default_config = {"slurm": {"timelimit": "1-00:00", "account": ""}, "sge": {"memtype": "mem_free", "pename": "smp"}, "lsf": {"walltime": "24:00", "account": ""}, "htcondor": {}, "torque": {"walltime": "24:00:00", "account": ""}, "pbspro": {"walltime": "24:00:00", "account": "", "cpu_and_mem": "-l select=1:ncpus=${cpu}:mem=${memory_mb}mb"}} prefixes = {("account", "slurm"): "-A ", ("account", "pbspro"): "-A "} custom = {("noselect", "pbspro"): ("cpu_and_mem", "-l ncpus=${cpu} -l mem=${memory_mb}mb")} cl = [] config = {} # HPC scheduling if args.scheduler: if args.scheduler not in default_config: raise ValueError("Scheduler not yet supported by Cromwell: %s" % args.scheduler) if not args.queue and args.scheduler not in ["htcondor"]: raise ValueError("Need to set queue (-q) for running with an HPC scheduler") config = default_config[args.scheduler] cl.append("-Dbackend.default=%s" % args.scheduler.upper()) config["queue"] = args.queue for rs in args.resources: for r in rs.split(";"): parts = r.split("=") if len(parts) == 2: key, val = parts config[key] = prefixes.get((key, args.scheduler), "") + val elif len(parts) == 1 and (parts[0], args.scheduler) in custom: key, val = custom[(parts[0], args.scheduler)] config[key] = val cloud_type = None if args.cloud_project: if args.cloud_root and args.cloud_root.startswith("gs:"): cloud_type = "PAPI" cloud_root = args.cloud_root cloud_region = None elif ((args.cloud_root and args.cloud_root.startswith("s3:")) or (args.cloud_project and args.cloud_project.startswith("arn:"))): cloud_type = "AWSBATCH" cloud_root = args.cloud_root if not cloud_root.startswith("s3://"): cloud_root = "s3://%s" % cloud_root # split region from input Amazon Resource Name, ie arn:aws:batch:us-east-1: cloud_region = args.cloud_project.split(":")[3] else: raise ValueError("Unexpected inputs for Cromwell Cloud support: %s %s" % (args.cloud_project, args.cloud_root)) config = {"cloud_project": args.cloud_project, "cloud_root": cloud_root, "cloud_region": cloud_region} cl.append("-Dbackend.default=%s" % cloud_type) return cl, config, args.scheduler, cloud_type
[ "def", "_args_to_cromwell", "(", "args", ")", ":", "default_config", "=", "{", "\"slurm\"", ":", "{", "\"timelimit\"", ":", "\"1-00:00\"", ",", "\"account\"", ":", "\"\"", "}", ",", "\"sge\"", ":", "{", "\"memtype\"", ":", "\"mem_free\"", ",", "\"pename\"", ...
Convert input arguments into cromwell inputs for config and command line.
[ "Convert", "input", "arguments", "into", "cromwell", "inputs", "for", "config", "and", "command", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L77-L128
223,239
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_filesystem_types
def _get_filesystem_types(args, sample_file): """Retrieve the types of inputs and staging based on sample JSON and arguments. """ out = set([]) ext = "" if args.no_container else "_container" with open(sample_file) as in_handle: for f in _get_file_paths(json.load(in_handle)): if f.startswith("gs:"): out.add("gcp%s" % ext) elif f.startswith("s3:"): out.add("s3%s" % ext) elif f.startswith(("https:", "http:")): out.add("http%s" % ext) else: out.add("local%s" % ext) return out
python
def _get_filesystem_types(args, sample_file): """Retrieve the types of inputs and staging based on sample JSON and arguments. """ out = set([]) ext = "" if args.no_container else "_container" with open(sample_file) as in_handle: for f in _get_file_paths(json.load(in_handle)): if f.startswith("gs:"): out.add("gcp%s" % ext) elif f.startswith("s3:"): out.add("s3%s" % ext) elif f.startswith(("https:", "http:")): out.add("http%s" % ext) else: out.add("local%s" % ext) return out
[ "def", "_get_filesystem_types", "(", "args", ",", "sample_file", ")", ":", "out", "=", "set", "(", "[", "]", ")", "ext", "=", "\"\"", "if", "args", ".", "no_container", "else", "\"_container\"", "with", "open", "(", "sample_file", ")", "as", "in_handle", ...
Retrieve the types of inputs and staging based on sample JSON and arguments.
[ "Retrieve", "the", "types", "of", "inputs", "and", "staging", "based", "on", "sample", "JSON", "and", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L130-L145
223,240
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_filesystem_config
def _get_filesystem_config(file_types): """Retrieve filesystem configuration, including support for specified file types. """ out = " filesystems {\n" for file_type in sorted(list(file_types)): if file_type in _FILESYSTEM_CONFIG: out += _FILESYSTEM_CONFIG[file_type] out += " }\n" return out
python
def _get_filesystem_config(file_types): """Retrieve filesystem configuration, including support for specified file types. """ out = " filesystems {\n" for file_type in sorted(list(file_types)): if file_type in _FILESYSTEM_CONFIG: out += _FILESYSTEM_CONFIG[file_type] out += " }\n" return out
[ "def", "_get_filesystem_config", "(", "file_types", ")", ":", "out", "=", "\" filesystems {\\n\"", "for", "file_type", "in", "sorted", "(", "list", "(", "file_types", ")", ")", ":", "if", "file_type", "in", "_FILESYSTEM_CONFIG", ":", "out", "+=", "_FILESYSTE...
Retrieve filesystem configuration, including support for specified file types.
[ "Retrieve", "filesystem", "configuration", "including", "support", "for", "specified", "file", "types", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L147-L155
223,241
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_engine_filesystem_config
def _get_engine_filesystem_config(file_types, args, conf_args): """Retriever authorization and engine filesystem configuration. """ file_types = [x.replace("_container", "") for x in list(file_types)] out = "" if "gcp" in file_types: out += _AUTH_CONFIG_GOOGLE if "s3" in file_types: out += _AUTH_CONFIG_AWS % conf_args["cloud_region"] if "gcp" in file_types or "http" in file_types or "s3" in file_types: out += "engine {\n" out += " filesystems {\n" if "gcp" in file_types: out += ' gcs {\n' out += ' auth = "gcp-auth"\n' if args.cloud_project: out += ' project = "%s"\n' % args.cloud_project out += ' }\n' if "http" in file_types: out += ' http {}\n' if "s3" in file_types: out += ' s3 { auth = "default" }' out += " }\n" out += "}\n" return out
python
def _get_engine_filesystem_config(file_types, args, conf_args): """Retriever authorization and engine filesystem configuration. """ file_types = [x.replace("_container", "") for x in list(file_types)] out = "" if "gcp" in file_types: out += _AUTH_CONFIG_GOOGLE if "s3" in file_types: out += _AUTH_CONFIG_AWS % conf_args["cloud_region"] if "gcp" in file_types or "http" in file_types or "s3" in file_types: out += "engine {\n" out += " filesystems {\n" if "gcp" in file_types: out += ' gcs {\n' out += ' auth = "gcp-auth"\n' if args.cloud_project: out += ' project = "%s"\n' % args.cloud_project out += ' }\n' if "http" in file_types: out += ' http {}\n' if "s3" in file_types: out += ' s3 { auth = "default" }' out += " }\n" out += "}\n" return out
[ "def", "_get_engine_filesystem_config", "(", "file_types", ",", "args", ",", "conf_args", ")", ":", "file_types", "=", "[", "x", ".", "replace", "(", "\"_container\"", ",", "\"\"", ")", "for", "x", "in", "list", "(", "file_types", ")", "]", "out", "=", "...
Retriever authorization and engine filesystem configuration.
[ "Retriever", "authorization", "and", "engine", "filesystem", "configuration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L212-L237
223,242
bcbio/bcbio-nextgen
bcbio/variation/normalize.py
normalize
def normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, rerun_effects=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Normalizes variants and reruns SnpEFF for resulting VCF """ if remove_oldeffects: out_file = "%s-noeff-nomultiallelic%s" % utils.splitext_plus(in_file) else: out_file = "%s-nomultiallelic%s" % utils.splitext_plus(in_file) if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): if vcfutils.vcf_has_variants(in_file): ready_ma_file = _normalize(in_file, data, passonly=passonly, normalize_indels=normalize_indels, split_biallelic=split_biallelic, remove_oldeffects=remove_oldeffects, nonrefonly=nonrefonly, work_dir=work_dir) if rerun_effects: ann_ma_file, _ = effects.add_to_vcf(ready_ma_file, data) if ann_ma_file: ready_ma_file = ann_ma_file utils.symlink_plus(ready_ma_file, out_file) else: utils.symlink_plus(in_file, out_file) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, rerun_effects=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Normalizes variants and reruns SnpEFF for resulting VCF """ if remove_oldeffects: out_file = "%s-noeff-nomultiallelic%s" % utils.splitext_plus(in_file) else: out_file = "%s-nomultiallelic%s" % utils.splitext_plus(in_file) if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): if vcfutils.vcf_has_variants(in_file): ready_ma_file = _normalize(in_file, data, passonly=passonly, normalize_indels=normalize_indels, split_biallelic=split_biallelic, remove_oldeffects=remove_oldeffects, nonrefonly=nonrefonly, work_dir=work_dir) if rerun_effects: ann_ma_file, _ = effects.add_to_vcf(ready_ma_file, data) if ann_ma_file: ready_ma_file = ann_ma_file utils.symlink_plus(ready_ma_file, out_file) else: utils.symlink_plus(in_file, out_file) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "normalize", "(", "in_file", ",", "data", ",", "passonly", "=", "False", ",", "normalize_indels", "=", "True", ",", "split_biallelic", "=", "True", ",", "rerun_effects", "=", "True", ",", "remove_oldeffects", "=", "False", ",", "nonrefonly", "=", "Fal...
Normalizes variants and reruns SnpEFF for resulting VCF
[ "Normalizes", "variants", "and", "reruns", "SnpEFF", "for", "resulting", "VCF" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/normalize.py#L59-L84
223,243
bcbio/bcbio-nextgen
bcbio/variation/normalize.py
_normalize
def _normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Convert multi-allelic variants into single allelic. `vt normalize` has the -n flag passed (skipping reference checks) because of errors where the reference genome has non GATCN ambiguous bases. These are not supported in VCF, so you'll have a mismatch of N in VCF versus R (or other ambiguous bases) in the genome. """ if remove_oldeffects: out_file = "%s-noeff-decompose%s" % utils.splitext_plus(in_file) old_effects = [a for a in ["CSQ", "ANN"] if a in cyvcf2.VCF(in_file)] if old_effects: clean_effects_cmd = " | bcftools annotate -x %s " % (",".join(["INFO/%s" % x for x in old_effects])) else: clean_effects_cmd = "" else: clean_effects_cmd = "" out_file = "%s-decompose%s" % utils.splitext_plus(in_file) if passonly or nonrefonly: subset_vcf_cmd = " | bcftools view " if passonly: subset_vcf_cmd += "-f 'PASS,.' " if nonrefonly: subset_vcf_cmd += "--min-ac 1:nref " else: subset_vcf_cmd = "" if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): ref_file = dd.get_ref_file(data) assert out_file.endswith(".vcf.gz") with file_transaction(data, out_file) as tx_out_file: cmd = ("gunzip -c " + in_file + subset_vcf_cmd + clean_effects_cmd + (" | vcfallelicprimitives -t DECOMPOSED --keep-geno" if split_biallelic else "") + " | sed 's/ID=AD,Number=./ID=AD,Number=R/'" + " | vt decompose -s - " + ((" | vt normalize -n -r " + ref_file + " - ") if normalize_indels else "") + " | awk '{ gsub(\"./-65\", \"./.\"); print $0 }'" + " | sed -e 's/Number=A/Number=1/g'" + " | bgzip -c > " + tx_out_file ) do.run(cmd, "Multi-allelic to single allele") return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Convert multi-allelic variants into single allelic. `vt normalize` has the -n flag passed (skipping reference checks) because of errors where the reference genome has non GATCN ambiguous bases. These are not supported in VCF, so you'll have a mismatch of N in VCF versus R (or other ambiguous bases) in the genome. """ if remove_oldeffects: out_file = "%s-noeff-decompose%s" % utils.splitext_plus(in_file) old_effects = [a for a in ["CSQ", "ANN"] if a in cyvcf2.VCF(in_file)] if old_effects: clean_effects_cmd = " | bcftools annotate -x %s " % (",".join(["INFO/%s" % x for x in old_effects])) else: clean_effects_cmd = "" else: clean_effects_cmd = "" out_file = "%s-decompose%s" % utils.splitext_plus(in_file) if passonly or nonrefonly: subset_vcf_cmd = " | bcftools view " if passonly: subset_vcf_cmd += "-f 'PASS,.' " if nonrefonly: subset_vcf_cmd += "--min-ac 1:nref " else: subset_vcf_cmd = "" if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): ref_file = dd.get_ref_file(data) assert out_file.endswith(".vcf.gz") with file_transaction(data, out_file) as tx_out_file: cmd = ("gunzip -c " + in_file + subset_vcf_cmd + clean_effects_cmd + (" | vcfallelicprimitives -t DECOMPOSED --keep-geno" if split_biallelic else "") + " | sed 's/ID=AD,Number=./ID=AD,Number=R/'" + " | vt decompose -s - " + ((" | vt normalize -n -r " + ref_file + " - ") if normalize_indels else "") + " | awk '{ gsub(\"./-65\", \"./.\"); print $0 }'" + " | sed -e 's/Number=A/Number=1/g'" + " | bgzip -c > " + tx_out_file ) do.run(cmd, "Multi-allelic to single allele") return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_normalize", "(", "in_file", ",", "data", ",", "passonly", "=", "False", ",", "normalize_indels", "=", "True", ",", "split_biallelic", "=", "True", ",", "remove_oldeffects", "=", "False", ",", "nonrefonly", "=", "False", ",", "work_dir", "=", "None",...
Convert multi-allelic variants into single allelic. `vt normalize` has the -n flag passed (skipping reference checks) because of errors where the reference genome has non GATCN ambiguous bases. These are not supported in VCF, so you'll have a mismatch of N in VCF versus R (or other ambiguous bases) in the genome.
[ "Convert", "multi", "-", "allelic", "variants", "into", "single", "allelic", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/normalize.py#L86-L130
223,244
bcbio/bcbio-nextgen
bcbio/rnaseq/sailfish.py
sleuthify_sailfish
def sleuthify_sailfish(sailfish_dir): """ if installed, use wasabi to create abundance.h5 output for use with sleuth """ if not R_package_path("wasabi"): return None else: rscript = Rscript_cmd() cmd = """{rscript} --no-environ -e 'library("wasabi"); prepare_fish_for_sleuth(c("{sailfish_dir}"))'""" do.run(cmd.format(**locals()), "Converting Sailfish to Sleuth format.") return os.path.join(sailfish_dir, "abundance.h5")
python
def sleuthify_sailfish(sailfish_dir): """ if installed, use wasabi to create abundance.h5 output for use with sleuth """ if not R_package_path("wasabi"): return None else: rscript = Rscript_cmd() cmd = """{rscript} --no-environ -e 'library("wasabi"); prepare_fish_for_sleuth(c("{sailfish_dir}"))'""" do.run(cmd.format(**locals()), "Converting Sailfish to Sleuth format.") return os.path.join(sailfish_dir, "abundance.h5")
[ "def", "sleuthify_sailfish", "(", "sailfish_dir", ")", ":", "if", "not", "R_package_path", "(", "\"wasabi\"", ")", ":", "return", "None", "else", ":", "rscript", "=", "Rscript_cmd", "(", ")", "cmd", "=", "\"\"\"{rscript} --no-environ -e 'library(\"wasabi\"); prepare_f...
if installed, use wasabi to create abundance.h5 output for use with sleuth
[ "if", "installed", "use", "wasabi", "to", "create", "abundance", ".", "h5", "output", "for", "use", "with", "sleuth" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/sailfish.py#L66-L77
223,245
bcbio/bcbio-nextgen
bcbio/rnaseq/sailfish.py
create_combined_fasta
def create_combined_fasta(data): """ if there are genomes to be disambiguated, create a FASTA file of all of the transcripts for all genomes """ out_dir = os.path.join(dd.get_work_dir(data), "inputs", "transcriptome") items = disambiguate.split([data]) fasta_files = [] for i in items: odata = i[0] gtf_file = dd.get_gtf_file(odata) ref_file = dd.get_ref_file(odata) out_file = os.path.join(out_dir, dd.get_genome_build(odata) + ".fa") if file_exists(out_file): fasta_files.append(out_file) else: out_file = gtf.gtf_to_fasta(gtf_file, ref_file, out_file=out_file) fasta_files.append(out_file) out_stem = os.path.join(out_dir, dd.get_genome_build(data)) if dd.get_disambiguate(data): out_stem = "-".join([out_stem] + (dd.get_disambiguate(data) or [])) combined_file = out_stem + ".fa" if file_exists(combined_file): return combined_file fasta_file_string = " ".join(fasta_files) cmd = "cat {fasta_file_string} > {tx_out_file}" with file_transaction(data, combined_file) as tx_out_file: do.run(cmd.format(**locals()), "Combining transcriptome FASTA files.") return combined_file
python
def create_combined_fasta(data): """ if there are genomes to be disambiguated, create a FASTA file of all of the transcripts for all genomes """ out_dir = os.path.join(dd.get_work_dir(data), "inputs", "transcriptome") items = disambiguate.split([data]) fasta_files = [] for i in items: odata = i[0] gtf_file = dd.get_gtf_file(odata) ref_file = dd.get_ref_file(odata) out_file = os.path.join(out_dir, dd.get_genome_build(odata) + ".fa") if file_exists(out_file): fasta_files.append(out_file) else: out_file = gtf.gtf_to_fasta(gtf_file, ref_file, out_file=out_file) fasta_files.append(out_file) out_stem = os.path.join(out_dir, dd.get_genome_build(data)) if dd.get_disambiguate(data): out_stem = "-".join([out_stem] + (dd.get_disambiguate(data) or [])) combined_file = out_stem + ".fa" if file_exists(combined_file): return combined_file fasta_file_string = " ".join(fasta_files) cmd = "cat {fasta_file_string} > {tx_out_file}" with file_transaction(data, combined_file) as tx_out_file: do.run(cmd.format(**locals()), "Combining transcriptome FASTA files.") return combined_file
[ "def", "create_combined_fasta", "(", "data", ")", ":", "out_dir", "=", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"inputs\"", ",", "\"transcriptome\"", ")", "items", "=", "disambiguate", ".", "split", "(", "[...
if there are genomes to be disambiguated, create a FASTA file of all of the transcripts for all genomes
[ "if", "there", "are", "genomes", "to", "be", "disambiguated", "create", "a", "FASTA", "file", "of", "all", "of", "the", "transcripts", "for", "all", "genomes" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/sailfish.py#L79-L108
223,246
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
prep_samples_and_config
def prep_samples_and_config(run_folder, ldetails, fastq_dir, config): """Prepare sample fastq files and provide global sample configuration for the flowcell. Handles merging of fastq files split by lane and also by the bcl2fastq preparation process. """ fastq_final_dir = utils.safe_makedir(os.path.join(fastq_dir, "merged")) cores = utils.get_in(config, ("algorithm", "num_cores"), 1) ldetails = joblib.Parallel(cores)(joblib.delayed(_prep_sample_and_config)(x, fastq_dir, fastq_final_dir) for x in _group_same_samples(ldetails)) config_file = _write_sample_config(run_folder, [x for x in ldetails if x]) return config_file, fastq_final_dir
python
def prep_samples_and_config(run_folder, ldetails, fastq_dir, config): """Prepare sample fastq files and provide global sample configuration for the flowcell. Handles merging of fastq files split by lane and also by the bcl2fastq preparation process. """ fastq_final_dir = utils.safe_makedir(os.path.join(fastq_dir, "merged")) cores = utils.get_in(config, ("algorithm", "num_cores"), 1) ldetails = joblib.Parallel(cores)(joblib.delayed(_prep_sample_and_config)(x, fastq_dir, fastq_final_dir) for x in _group_same_samples(ldetails)) config_file = _write_sample_config(run_folder, [x for x in ldetails if x]) return config_file, fastq_final_dir
[ "def", "prep_samples_and_config", "(", "run_folder", ",", "ldetails", ",", "fastq_dir", ",", "config", ")", ":", "fastq_final_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "fastq_dir", ",", "\"merged\"", ")", ")", "cores"...
Prepare sample fastq files and provide global sample configuration for the flowcell. Handles merging of fastq files split by lane and also by the bcl2fastq preparation process.
[ "Prepare", "sample", "fastq", "files", "and", "provide", "global", "sample", "configuration", "for", "the", "flowcell", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L23-L34
223,247
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_prep_sample_and_config
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir): """Prepare output fastq file and configuration for a single sample. Only passes non-empty files through for processing. """ files = [] print("->", ldetail_group[0]["name"], len(ldetail_group)) for read in ["R1", "R2"]: fastq_inputs = sorted(list(set(reduce(operator.add, (_get_fastq_files(x, read, fastq_dir) for x in ldetail_group))))) if len(fastq_inputs) > 0: files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0])) if len(files) > 0: if _non_empty(files[0]): out = ldetail_group[0] out["files"] = files return out
python
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir): """Prepare output fastq file and configuration for a single sample. Only passes non-empty files through for processing. """ files = [] print("->", ldetail_group[0]["name"], len(ldetail_group)) for read in ["R1", "R2"]: fastq_inputs = sorted(list(set(reduce(operator.add, (_get_fastq_files(x, read, fastq_dir) for x in ldetail_group))))) if len(fastq_inputs) > 0: files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0])) if len(files) > 0: if _non_empty(files[0]): out = ldetail_group[0] out["files"] = files return out
[ "def", "_prep_sample_and_config", "(", "ldetail_group", ",", "fastq_dir", ",", "fastq_final_dir", ")", ":", "files", "=", "[", "]", "print", "(", "\"->\"", ",", "ldetail_group", "[", "0", "]", "[", "\"name\"", "]", ",", "len", "(", "ldetail_group", ")", ")...
Prepare output fastq file and configuration for a single sample. Only passes non-empty files through for processing.
[ "Prepare", "output", "fastq", "file", "and", "configuration", "for", "a", "single", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L36-L52
223,248
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_write_sample_config
def _write_sample_config(run_folder, ldetails): """Generate a bcbio-nextgen YAML configuration file for processing a sample. """ out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder)) with open(out_file, "w") as out_handle: fc_name, fc_date = flowcell.parse_dirname(run_folder) out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails], key=operator.itemgetter("name", "description")), "fc_name": fc_name, "fc_date": fc_date} yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file
python
def _write_sample_config(run_folder, ldetails): """Generate a bcbio-nextgen YAML configuration file for processing a sample. """ out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder)) with open(out_file, "w") as out_handle: fc_name, fc_date = flowcell.parse_dirname(run_folder) out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails], key=operator.itemgetter("name", "description")), "fc_name": fc_name, "fc_date": fc_date} yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file
[ "def", "_write_sample_config", "(", "run_folder", ",", "ldetails", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "run_folder", ",", "\"%s.yaml\"", "%", "os", ".", "path", ".", "basename", "(", "run_folder", ")", ")", "with", "open", "("...
Generate a bcbio-nextgen YAML configuration file for processing a sample.
[ "Generate", "a", "bcbio", "-", "nextgen", "YAML", "configuration", "file", "for", "processing", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L60-L71
223,249
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_prepare_sample
def _prepare_sample(data, run_folder): """Extract passed keywords from input LIMS information. """ want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _select_default_algorithm(out.get("analysis")) out["algorithm"] = algorithm out["analysis"] = analysis description = "%s-%s" % (out["name"], clean_name(out["description"])) out["name"] = [out["name"], description] out["description"] = description return out
python
def _prepare_sample(data, run_folder): """Extract passed keywords from input LIMS information. """ want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _select_default_algorithm(out.get("analysis")) out["algorithm"] = algorithm out["analysis"] = analysis description = "%s-%s" % (out["name"], clean_name(out["description"])) out["name"] = [out["name"], description] out["description"] = description return out
[ "def", "_prepare_sample", "(", "data", ",", "run_folder", ")", ":", "want", "=", "set", "(", "[", "\"description\"", ",", "\"files\"", ",", "\"genome_build\"", ",", "\"name\"", ",", "\"analysis\"", ",", "\"upload\"", ",", "\"algorithm\"", "]", ")", "out", "=...
Extract passed keywords from input LIMS information.
[ "Extract", "passed", "keywords", "from", "input", "LIMS", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L73-L88
223,250
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_select_default_algorithm
def _select_default_algorithm(analysis): """Provide default algorithm sections from templates or standard """ if not analysis or analysis == "Standard": return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard", "recalibrate": False, "realign": False, "mark_duplicates": True, "variantcaller": False} elif "variant" in analysis: try: config, _ = template.name_to_config(analysis) except ValueError: config, _ = template.name_to_config("freebayes-variant") return "variant", config["details"][0]["algorithm"] else: return analysis, {}
python
def _select_default_algorithm(analysis): """Provide default algorithm sections from templates or standard """ if not analysis or analysis == "Standard": return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard", "recalibrate": False, "realign": False, "mark_duplicates": True, "variantcaller": False} elif "variant" in analysis: try: config, _ = template.name_to_config(analysis) except ValueError: config, _ = template.name_to_config("freebayes-variant") return "variant", config["details"][0]["algorithm"] else: return analysis, {}
[ "def", "_select_default_algorithm", "(", "analysis", ")", ":", "if", "not", "analysis", "or", "analysis", "==", "\"Standard\"", ":", "return", "\"Standard\"", ",", "{", "\"aligner\"", ":", "\"bwa\"", ",", "\"platform\"", ":", "\"illumina\"", ",", "\"quality_format...
Provide default algorithm sections from templates or standard
[ "Provide", "default", "algorithm", "sections", "from", "templates", "or", "standard" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L90-L104
223,251
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_relative_paths
def _relative_paths(xs, base_path): """Adjust paths to be relative to the provided base path. """ if isinstance(xs, six.string_types): if xs.startswith(base_path): return xs.replace(base_path + "/", "", 1) else: return xs elif isinstance(xs, (list, tuple)): return [_relative_paths(x, base_path) for x in xs] elif isinstance(xs, dict): out = {} for k, v in xs.items(): out[k] = _relative_paths(v, base_path) return out else: return xs
python
def _relative_paths(xs, base_path): """Adjust paths to be relative to the provided base path. """ if isinstance(xs, six.string_types): if xs.startswith(base_path): return xs.replace(base_path + "/", "", 1) else: return xs elif isinstance(xs, (list, tuple)): return [_relative_paths(x, base_path) for x in xs] elif isinstance(xs, dict): out = {} for k, v in xs.items(): out[k] = _relative_paths(v, base_path) return out else: return xs
[ "def", "_relative_paths", "(", "xs", ",", "base_path", ")", ":", "if", "isinstance", "(", "xs", ",", "six", ".", "string_types", ")", ":", "if", "xs", ".", "startswith", "(", "base_path", ")", ":", "return", "xs", ".", "replace", "(", "base_path", "+",...
Adjust paths to be relative to the provided base path.
[ "Adjust", "paths", "to", "be", "relative", "to", "the", "provided", "base", "path", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L106-L122
223,252
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_get_fastq_files
def _get_fastq_files(ldetail, read, fastq_dir): """Retrieve fastq files corresponding to the sample and read number. """ return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"], "Sample_%s" % ldetail["name"], "%s_*_%s_*.fastq.gz" % (ldetail["name"], read)))
python
def _get_fastq_files(ldetail, read, fastq_dir): """Retrieve fastq files corresponding to the sample and read number. """ return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"], "Sample_%s" % ldetail["name"], "%s_*_%s_*.fastq.gz" % (ldetail["name"], read)))
[ "def", "_get_fastq_files", "(", "ldetail", ",", "read", ",", "fastq_dir", ")", ":", "return", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fastq_dir", ",", "\"Project_%s\"", "%", "ldetail", "[", "\"project_name\"", "]", ",", "\"Sample_%s...
Retrieve fastq files corresponding to the sample and read number.
[ "Retrieve", "fastq", "files", "corresponding", "to", "the", "sample", "and", "read", "number", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L124-L129
223,253
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_concat_bgzip_fastq
def _concat_bgzip_fastq(finputs, out_dir, read, ldetail): """Concatenate multiple input fastq files, preparing a bgzipped output file. """ out_file = os.path.join(out_dir, "%s_%s.fastq.gz" % (ldetail["name"], read)) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out_file: subprocess.check_call("zcat %s | bgzip -c > %s" % (" ".join(finputs), tx_out_file), shell=True) return out_file
python
def _concat_bgzip_fastq(finputs, out_dir, read, ldetail): """Concatenate multiple input fastq files, preparing a bgzipped output file. """ out_file = os.path.join(out_dir, "%s_%s.fastq.gz" % (ldetail["name"], read)) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out_file: subprocess.check_call("zcat %s | bgzip -c > %s" % (" ".join(finputs), tx_out_file), shell=True) return out_file
[ "def", "_concat_bgzip_fastq", "(", "finputs", ",", "out_dir", ",", "read", ",", "ldetail", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s_%s.fastq.gz\"", "%", "(", "ldetail", "[", "\"name\"", "]", ",", "read", ")", ...
Concatenate multiple input fastq files, preparing a bgzipped output file.
[ "Concatenate", "multiple", "input", "fastq", "files", "preparing", "a", "bgzipped", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L131-L138
223,254
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
get_runinfo
def get_runinfo(galaxy_url, galaxy_apikey, run_folder, storedir): """Retrieve flattened run information for a processed directory from Galaxy nglims API. """ galaxy_api = GalaxyApiAccess(galaxy_url, galaxy_apikey) fc_name, fc_date = flowcell.parse_dirname(run_folder) galaxy_info = galaxy_api.run_details(fc_name, fc_date) if "error" in galaxy_info: return galaxy_info if not galaxy_info["run_name"].startswith(fc_date) and not galaxy_info["run_name"].endswith(fc_name): raise ValueError("Galaxy NGLIMS information %s does not match flowcell %s %s" % (galaxy_info["run_name"], fc_date, fc_name)) ldetails = _flatten_lane_details(galaxy_info) out = [] for item in ldetails: # Do uploads for all non-controls if item["description"] != "control" or item["project_name"] != "control": item["upload"] = {"method": "galaxy", "run_id": galaxy_info["run_id"], "fc_name": fc_name, "fc_date": fc_date, "dir": storedir, "galaxy_url": galaxy_url, "galaxy_api_key": galaxy_apikey} for k in ["lab_association", "private_libs", "researcher", "researcher_id", "sample_id", "galaxy_library", "galaxy_role"]: item["upload"][k] = item.pop(k, "") out.append(item) return out
python
def get_runinfo(galaxy_url, galaxy_apikey, run_folder, storedir): """Retrieve flattened run information for a processed directory from Galaxy nglims API. """ galaxy_api = GalaxyApiAccess(galaxy_url, galaxy_apikey) fc_name, fc_date = flowcell.parse_dirname(run_folder) galaxy_info = galaxy_api.run_details(fc_name, fc_date) if "error" in galaxy_info: return galaxy_info if not galaxy_info["run_name"].startswith(fc_date) and not galaxy_info["run_name"].endswith(fc_name): raise ValueError("Galaxy NGLIMS information %s does not match flowcell %s %s" % (galaxy_info["run_name"], fc_date, fc_name)) ldetails = _flatten_lane_details(galaxy_info) out = [] for item in ldetails: # Do uploads for all non-controls if item["description"] != "control" or item["project_name"] != "control": item["upload"] = {"method": "galaxy", "run_id": galaxy_info["run_id"], "fc_name": fc_name, "fc_date": fc_date, "dir": storedir, "galaxy_url": galaxy_url, "galaxy_api_key": galaxy_apikey} for k in ["lab_association", "private_libs", "researcher", "researcher_id", "sample_id", "galaxy_library", "galaxy_role"]: item["upload"][k] = item.pop(k, "") out.append(item) return out
[ "def", "get_runinfo", "(", "galaxy_url", ",", "galaxy_apikey", ",", "run_folder", ",", "storedir", ")", ":", "galaxy_api", "=", "GalaxyApiAccess", "(", "galaxy_url", ",", "galaxy_apikey", ")", "fc_name", ",", "fc_date", "=", "flowcell", ".", "parse_dirname", "("...
Retrieve flattened run information for a processed directory from Galaxy nglims API.
[ "Retrieve", "flattened", "run", "information", "for", "a", "processed", "directory", "from", "Galaxy", "nglims", "API", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L148-L172
223,255
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_flatten_lane_details
def _flatten_lane_details(runinfo): """Provide flattened lane information with multiplexed barcodes separated. """ out = [] for ldetail in runinfo["details"]: # handle controls if "project_name" not in ldetail and ldetail["description"] == "control": ldetail["project_name"] = "control" for i, barcode in enumerate(ldetail.get("multiplex", [{}])): cur = copy.deepcopy(ldetail) cur["name"] = "%s-%s" % (ldetail["name"], i + 1) cur["description"] = barcode.get("name", ldetail["description"]) cur["bc_index"] = barcode.get("sequence", "") cur["project_name"] = clean_name(ldetail["project_name"]) out.append(cur) return out
python
def _flatten_lane_details(runinfo): """Provide flattened lane information with multiplexed barcodes separated. """ out = [] for ldetail in runinfo["details"]: # handle controls if "project_name" not in ldetail and ldetail["description"] == "control": ldetail["project_name"] = "control" for i, barcode in enumerate(ldetail.get("multiplex", [{}])): cur = copy.deepcopy(ldetail) cur["name"] = "%s-%s" % (ldetail["name"], i + 1) cur["description"] = barcode.get("name", ldetail["description"]) cur["bc_index"] = barcode.get("sequence", "") cur["project_name"] = clean_name(ldetail["project_name"]) out.append(cur) return out
[ "def", "_flatten_lane_details", "(", "runinfo", ")", ":", "out", "=", "[", "]", "for", "ldetail", "in", "runinfo", "[", "\"details\"", "]", ":", "# handle controls", "if", "\"project_name\"", "not", "in", "ldetail", "and", "ldetail", "[", "\"description\"", "]...
Provide flattened lane information with multiplexed barcodes separated.
[ "Provide", "flattened", "lane", "information", "with", "multiplexed", "barcodes", "separated", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L174-L189
223,256
bcbio/bcbio-nextgen
bcbio/distributed/split.py
grouped_parallel_split_combine
def grouped_parallel_split_combine(args, split_fn, group_fn, parallel_fn, parallel_name, combine_name, file_key, combine_arg_keys, split_outfile_i=-1): """Parallel split runner that allows grouping of samples during processing. This builds on parallel_split_combine to provide the additional ability to group samples and subsequently split them back apart. This allows analysis of related samples together. In addition to the arguments documented in parallel_split_combine, this needs: group_fn: A function that groups samples together given their configuration details. """ grouped_args = group_fn(args) split_args, combine_map, finished_out, extras = _get_split_tasks(grouped_args, split_fn, file_key, split_outfile_i) final_output = parallel_fn(parallel_name, split_args) combine_args, final_args = _organize_output(final_output, combine_map, file_key, combine_arg_keys) parallel_fn(combine_name, combine_args) return finished_out + final_args + extras
python
def grouped_parallel_split_combine(args, split_fn, group_fn, parallel_fn, parallel_name, combine_name, file_key, combine_arg_keys, split_outfile_i=-1): """Parallel split runner that allows grouping of samples during processing. This builds on parallel_split_combine to provide the additional ability to group samples and subsequently split them back apart. This allows analysis of related samples together. In addition to the arguments documented in parallel_split_combine, this needs: group_fn: A function that groups samples together given their configuration details. """ grouped_args = group_fn(args) split_args, combine_map, finished_out, extras = _get_split_tasks(grouped_args, split_fn, file_key, split_outfile_i) final_output = parallel_fn(parallel_name, split_args) combine_args, final_args = _organize_output(final_output, combine_map, file_key, combine_arg_keys) parallel_fn(combine_name, combine_args) return finished_out + final_args + extras
[ "def", "grouped_parallel_split_combine", "(", "args", ",", "split_fn", ",", "group_fn", ",", "parallel_fn", ",", "parallel_name", ",", "combine_name", ",", "file_key", ",", "combine_arg_keys", ",", "split_outfile_i", "=", "-", "1", ")", ":", "grouped_args", "=", ...
Parallel split runner that allows grouping of samples during processing. This builds on parallel_split_combine to provide the additional ability to group samples and subsequently split them back apart. This allows analysis of related samples together. In addition to the arguments documented in parallel_split_combine, this needs: group_fn: A function that groups samples together given their configuration details.
[ "Parallel", "split", "runner", "that", "allows", "grouping", "of", "samples", "during", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L18-L39
223,257
bcbio/bcbio-nextgen
bcbio/distributed/split.py
parallel_split_combine
def parallel_split_combine(args, split_fn, parallel_fn, parallel_name, combiner, file_key, combine_arg_keys, split_outfile_i=-1): """Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list. """ args = [x[0] for x in args] split_args, combine_map, finished_out, extras = _get_split_tasks(args, split_fn, file_key, split_outfile_i) split_output = parallel_fn(parallel_name, split_args) if isinstance(combiner, six.string_types): combine_args, final_args = _organize_output(split_output, combine_map, file_key, combine_arg_keys) parallel_fn(combiner, combine_args) elif callable(combiner): final_args = combiner(split_output, combine_map, file_key) return finished_out + final_args + extras
python
def parallel_split_combine(args, split_fn, parallel_fn, parallel_name, combiner, file_key, combine_arg_keys, split_outfile_i=-1): """Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list. """ args = [x[0] for x in args] split_args, combine_map, finished_out, extras = _get_split_tasks(args, split_fn, file_key, split_outfile_i) split_output = parallel_fn(parallel_name, split_args) if isinstance(combiner, six.string_types): combine_args, final_args = _organize_output(split_output, combine_map, file_key, combine_arg_keys) parallel_fn(combiner, combine_args) elif callable(combiner): final_args = combiner(split_output, combine_map, file_key) return finished_out + final_args + extras
[ "def", "parallel_split_combine", "(", "args", ",", "split_fn", ",", "parallel_fn", ",", "parallel_name", ",", "combiner", ",", "file_key", ",", "combine_arg_keys", ",", "split_outfile_i", "=", "-", "1", ")", ":", "args", "=", "[", "x", "[", "0", "]", "for"...
Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list.
[ "Split", "run", "split", "items", "in", "parallel", "then", "combine", "to", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L41-L69
223,258
bcbio/bcbio-nextgen
bcbio/distributed/split.py
_get_extra_args
def _get_extra_args(extra_args, arg_keys): """Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined. """ # XXX back compatible hack -- should have a way to specify these. single_keys = set(["sam_ref", "config"]) out = [] for i, arg_key in enumerate(arg_keys): vals = [xs[i] for xs in extra_args] if arg_key in single_keys: out.append(vals[-1]) else: out.append(vals) return out
python
def _get_extra_args(extra_args, arg_keys): """Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined. """ # XXX back compatible hack -- should have a way to specify these. single_keys = set(["sam_ref", "config"]) out = [] for i, arg_key in enumerate(arg_keys): vals = [xs[i] for xs in extra_args] if arg_key in single_keys: out.append(vals[-1]) else: out.append(vals) return out
[ "def", "_get_extra_args", "(", "extra_args", ",", "arg_keys", ")", ":", "# XXX back compatible hack -- should have a way to specify these.", "single_keys", "=", "set", "(", "[", "\"sam_ref\"", ",", "\"config\"", "]", ")", "out", "=", "[", "]", "for", "i", ",", "ar...
Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined.
[ "Retrieve", "extra", "arguments", "to", "pass", "along", "to", "combine", "function", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L71-L87
223,259
bcbio/bcbio-nextgen
bcbio/distributed/split.py
_organize_output
def _organize_output(output, combine_map, file_key, combine_arg_keys): """Combine output details for parallelization. file_key is the key name of the output file used in merging. We extract this file from the output data. combine_arg_keys are extra items to pass along to the combine function. """ out_map = collections.defaultdict(list) extra_args = collections.defaultdict(list) final_args = collections.OrderedDict() extras = [] for data in output: cur_file = data.get(file_key) if not cur_file: extras.append([data]) else: cur_out = combine_map[cur_file] out_map[cur_out].append(cur_file) extra_args[cur_out].append([data[x] for x in combine_arg_keys]) data[file_key] = cur_out if cur_out not in final_args: final_args[cur_out] = [data] else: extras.append([data]) combine_args = [[v, k] + _get_extra_args(extra_args[k], combine_arg_keys) for (k, v) in out_map.items()] return combine_args, list(final_args.values()) + extras
python
def _organize_output(output, combine_map, file_key, combine_arg_keys): """Combine output details for parallelization. file_key is the key name of the output file used in merging. We extract this file from the output data. combine_arg_keys are extra items to pass along to the combine function. """ out_map = collections.defaultdict(list) extra_args = collections.defaultdict(list) final_args = collections.OrderedDict() extras = [] for data in output: cur_file = data.get(file_key) if not cur_file: extras.append([data]) else: cur_out = combine_map[cur_file] out_map[cur_out].append(cur_file) extra_args[cur_out].append([data[x] for x in combine_arg_keys]) data[file_key] = cur_out if cur_out not in final_args: final_args[cur_out] = [data] else: extras.append([data]) combine_args = [[v, k] + _get_extra_args(extra_args[k], combine_arg_keys) for (k, v) in out_map.items()] return combine_args, list(final_args.values()) + extras
[ "def", "_organize_output", "(", "output", ",", "combine_map", ",", "file_key", ",", "combine_arg_keys", ")", ":", "out_map", "=", "collections", ".", "defaultdict", "(", "list", ")", "extra_args", "=", "collections", ".", "defaultdict", "(", "list", ")", "fina...
Combine output details for parallelization. file_key is the key name of the output file used in merging. We extract this file from the output data. combine_arg_keys are extra items to pass along to the combine function.
[ "Combine", "output", "details", "for", "parallelization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L89-L116
223,260
bcbio/bcbio-nextgen
bcbio/distributed/split.py
_get_split_tasks
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1): """Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list. """ split_args = [] combine_map = {} finished_map = collections.OrderedDict() extras = [] for data in args: out_final, out_parts = split_fn(data) for parts in out_parts: split_args.append([utils.deepish_copy(data)] + list(parts)) for part_file in [x[outfile_i] for x in out_parts]: combine_map[part_file] = out_final if len(out_parts) == 0: if out_final is not None: if out_final not in finished_map: data[file_key] = out_final finished_map[out_final] = [data] else: extras.append([data]) else: extras.append([data]) return split_args, combine_map, list(finished_map.values()), extras
python
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1): """Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list. """ split_args = [] combine_map = {} finished_map = collections.OrderedDict() extras = [] for data in args: out_final, out_parts = split_fn(data) for parts in out_parts: split_args.append([utils.deepish_copy(data)] + list(parts)) for part_file in [x[outfile_i] for x in out_parts]: combine_map[part_file] = out_final if len(out_parts) == 0: if out_final is not None: if out_final not in finished_map: data[file_key] = out_final finished_map[out_final] = [data] else: extras.append([data]) else: extras.append([data]) return split_args, combine_map, list(finished_map.values()), extras
[ "def", "_get_split_tasks", "(", "args", ",", "split_fn", ",", "file_key", ",", "outfile_i", "=", "-", "1", ")", ":", "split_args", "=", "[", "]", "combine_map", "=", "{", "}", "finished_map", "=", "collections", ".", "OrderedDict", "(", ")", "extras", "=...
Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list.
[ "Split", "up", "input", "files", "and", "arguments", "returning", "arguments", "for", "parallel", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L118-L143
223,261
bcbio/bcbio-nextgen
bcbio/ngsalign/snap.py
remap_index_fn
def remap_index_fn(ref_file): """Map sequence references to snap reference directory, using standard layout. """ snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap") assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir return snap_dir
python
def remap_index_fn(ref_file): """Map sequence references to snap reference directory, using standard layout. """ snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap") assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir return snap_dir
[ "def", "remap_index_fn", "(", "ref_file", ")", ":", "snap_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "ref_file", ")", ",", "os", ".", "pardir", ",", "\"snap\"", ")", "assert", "os", ".", "path", ".", "ex...
Map sequence references to snap reference directory, using standard layout.
[ "Map", "sequence", "references", "to", "snap", "reference", "directory", "using", "standard", "layout", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/snap.py#L81-L86
223,262
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
use_general_sv_bins
def use_general_sv_bins(data): """Check if we should use a general binning approach for a sample. Checks if CNVkit is enabled and we haven't already run CNVkit. """ if any([c in dd.get_svcaller(data) for c in ["cnvkit", "titancna", "purecn", "gatk-cnv"]]): if not _get_original_coverage(data): return True return False
python
def use_general_sv_bins(data): """Check if we should use a general binning approach for a sample. Checks if CNVkit is enabled and we haven't already run CNVkit. """ if any([c in dd.get_svcaller(data) for c in ["cnvkit", "titancna", "purecn", "gatk-cnv"]]): if not _get_original_coverage(data): return True return False
[ "def", "use_general_sv_bins", "(", "data", ")", ":", "if", "any", "(", "[", "c", "in", "dd", ".", "get_svcaller", "(", "data", ")", "for", "c", "in", "[", "\"cnvkit\"", ",", "\"titancna\"", ",", "\"purecn\"", ",", "\"gatk-cnv\"", "]", "]", ")", ":", ...
Check if we should use a general binning approach for a sample. Checks if CNVkit is enabled and we haven't already run CNVkit.
[ "Check", "if", "we", "should", "use", "a", "general", "binning", "approach", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L31-L39
223,263
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
bin_approach
def bin_approach(data): """Check for binning approach from configuration or normalized file. """ for approach in ["cnvkit", "gatk-cnv"]: if approach in dd.get_svcaller(data): return approach norm_file = tz.get_in(["depth", "bins", "normalized"], data) if norm_file.endswith(("-crstandardized.tsv", "-crdenoised.tsv")): return "gatk-cnv" if norm_file.endswith(".cnr"): return "cnvkit"
python
def bin_approach(data): """Check for binning approach from configuration or normalized file. """ for approach in ["cnvkit", "gatk-cnv"]: if approach in dd.get_svcaller(data): return approach norm_file = tz.get_in(["depth", "bins", "normalized"], data) if norm_file.endswith(("-crstandardized.tsv", "-crdenoised.tsv")): return "gatk-cnv" if norm_file.endswith(".cnr"): return "cnvkit"
[ "def", "bin_approach", "(", "data", ")", ":", "for", "approach", "in", "[", "\"cnvkit\"", ",", "\"gatk-cnv\"", "]", ":", "if", "approach", "in", "dd", ".", "get_svcaller", "(", "data", ")", ":", "return", "approach", "norm_file", "=", "tz", ".", "get_in"...
Check for binning approach from configuration or normalized file.
[ "Check", "for", "binning", "approach", "from", "configuration", "or", "normalized", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L41-L51
223,264
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_by_type
def _cnvkit_by_type(items, background): """Dispatch to specific CNVkit functionality based on input type. """ if len(items + background) == 1: return _run_cnvkit_single(items[0]) elif vcfutils.get_paired_phenotype(items[0]): return _run_cnvkit_cancer(items, background) else: return _run_cnvkit_population(items, background)
python
def _cnvkit_by_type(items, background): """Dispatch to specific CNVkit functionality based on input type. """ if len(items + background) == 1: return _run_cnvkit_single(items[0]) elif vcfutils.get_paired_phenotype(items[0]): return _run_cnvkit_cancer(items, background) else: return _run_cnvkit_population(items, background)
[ "def", "_cnvkit_by_type", "(", "items", ",", "background", ")", ":", "if", "len", "(", "items", "+", "background", ")", "==", "1", ":", "return", "_run_cnvkit_single", "(", "items", "[", "0", "]", ")", "elif", "vcfutils", ".", "get_paired_phenotype", "(", ...
Dispatch to specific CNVkit functionality based on input type.
[ "Dispatch", "to", "specific", "CNVkit", "functionality", "based", "on", "input", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L63-L71
223,265
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_associate_cnvkit_out
def _associate_cnvkit_out(ckouts, items, is_somatic=False): """Associate cnvkit output with individual items. """ assert len(ckouts) == len(items) out = [] upload_counts = collections.defaultdict(int) for ckout, data in zip(ckouts, items): ckout = copy.deepcopy(ckout) ckout["variantcaller"] = "cnvkit" if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]): ckout = _add_seg_to_output(ckout, data) ckout = _add_gainloss_to_output(ckout, data) ckout = _add_segmetrics_to_output(ckout, data) ckout = _add_variantcalls_to_output(ckout, data, items, is_somatic) # ckout = _add_coverage_bedgraph_to_output(ckout, data) ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data) if "svplots" in dd.get_tools_on(data): ckout = _add_plots_to_output(ckout, data) ckout["do_upload"] = upload_counts[ckout.get("vrn_file")] == 0 if "sv" not in data: data["sv"] = [] data["sv"].append(ckout) if ckout.get("vrn_file"): upload_counts[ckout["vrn_file"]] += 1 out.append(data) return out
python
def _associate_cnvkit_out(ckouts, items, is_somatic=False): """Associate cnvkit output with individual items. """ assert len(ckouts) == len(items) out = [] upload_counts = collections.defaultdict(int) for ckout, data in zip(ckouts, items): ckout = copy.deepcopy(ckout) ckout["variantcaller"] = "cnvkit" if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]): ckout = _add_seg_to_output(ckout, data) ckout = _add_gainloss_to_output(ckout, data) ckout = _add_segmetrics_to_output(ckout, data) ckout = _add_variantcalls_to_output(ckout, data, items, is_somatic) # ckout = _add_coverage_bedgraph_to_output(ckout, data) ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data) if "svplots" in dd.get_tools_on(data): ckout = _add_plots_to_output(ckout, data) ckout["do_upload"] = upload_counts[ckout.get("vrn_file")] == 0 if "sv" not in data: data["sv"] = [] data["sv"].append(ckout) if ckout.get("vrn_file"): upload_counts[ckout["vrn_file"]] += 1 out.append(data) return out
[ "def", "_associate_cnvkit_out", "(", "ckouts", ",", "items", ",", "is_somatic", "=", "False", ")", ":", "assert", "len", "(", "ckouts", ")", "==", "len", "(", "items", ")", "out", "=", "[", "]", "upload_counts", "=", "collections", ".", "defaultdict", "(...
Associate cnvkit output with individual items.
[ "Associate", "cnvkit", "output", "with", "individual", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L73-L98
223,266
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_run_cnvkit_single
def _run_cnvkit_single(data, background=None): """Process a single input file with BAM or uniform background. """ if not background: background = [] ckouts = _run_cnvkit_shared([data], background) if not ckouts: return [data] else: assert len(ckouts) == 1 return _associate_cnvkit_out(ckouts, [data])
python
def _run_cnvkit_single(data, background=None): """Process a single input file with BAM or uniform background. """ if not background: background = [] ckouts = _run_cnvkit_shared([data], background) if not ckouts: return [data] else: assert len(ckouts) == 1 return _associate_cnvkit_out(ckouts, [data])
[ "def", "_run_cnvkit_single", "(", "data", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "ckouts", "=", "_run_cnvkit_shared", "(", "[", "data", "]", ",", "background", ")", "if", "not", "ckouts", ":...
Process a single input file with BAM or uniform background.
[ "Process", "a", "single", "input", "file", "with", "BAM", "or", "uniform", "background", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L100-L110
223,267
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_run_cnvkit_population
def _run_cnvkit_population(items, background): """Run CNVkit on a population of samples. Tries to calculate background based on case/controls, otherwise uses samples from the same batch as background. """ if background and len(background) > 0: inputs = items else: inputs, background = shared.find_case_control(items) # if we have case/control organized background or a single sample if len(inputs) == 1 or len(background) > 0: ckouts = _run_cnvkit_shared(inputs, background) return _associate_cnvkit_out(ckouts, inputs) + background # otherwise run each sample with the others in the batch as background else: out = [] for cur_input in items: background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)] ckouts = _run_cnvkit_shared([cur_input], background) out.extend(_associate_cnvkit_out(ckouts, [cur_input])) return out
python
def _run_cnvkit_population(items, background): """Run CNVkit on a population of samples. Tries to calculate background based on case/controls, otherwise uses samples from the same batch as background. """ if background and len(background) > 0: inputs = items else: inputs, background = shared.find_case_control(items) # if we have case/control organized background or a single sample if len(inputs) == 1 or len(background) > 0: ckouts = _run_cnvkit_shared(inputs, background) return _associate_cnvkit_out(ckouts, inputs) + background # otherwise run each sample with the others in the batch as background else: out = [] for cur_input in items: background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)] ckouts = _run_cnvkit_shared([cur_input], background) out.extend(_associate_cnvkit_out(ckouts, [cur_input])) return out
[ "def", "_run_cnvkit_population", "(", "items", ",", "background", ")", ":", "if", "background", "and", "len", "(", "background", ")", ">", "0", ":", "inputs", "=", "items", "else", ":", "inputs", ",", "background", "=", "shared", ".", "find_case_control", ...
Run CNVkit on a population of samples. Tries to calculate background based on case/controls, otherwise uses samples from the same batch as background.
[ "Run", "CNVkit", "on", "a", "population", "of", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L141-L163
223,268
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_prep_cmd
def _prep_cmd(cmd, tx_out_file): """Wrap CNVkit commands ensuring we use local temporary directories. """ cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
python
def _prep_cmd(cmd, tx_out_file): """Wrap CNVkit commands ensuring we use local temporary directories. """ cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
[ "def", "_prep_cmd", "(", "cmd", ",", "tx_out_file", ")", ":", "cmd", "=", "\" \"", ".", "join", "(", "cmd", ")", "if", "isinstance", "(", "cmd", ",", "(", "list", ",", "tuple", ")", ")", "else", "cmd", "return", "\"export TMPDIR=%s && %s\"", "%", "(", ...
Wrap CNVkit commands ensuring we use local temporary directories.
[ "Wrap", "CNVkit", "commands", "ensuring", "we", "use", "local", "temporary", "directories", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L168-L172
223,269
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_bam_to_outbase
def _bam_to_outbase(bam_file, work_dir, data): """Convert an input BAM file into CNVkit expected output. Handles previous non-batch cases to avoid re-calculating, returning both new and old values: """ batch = dd.get_batch(data) or dd.get_sample_name(data) out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0] base = os.path.join(work_dir, out_base) return "%s-%s" % (base, batch), base
python
def _bam_to_outbase(bam_file, work_dir, data): """Convert an input BAM file into CNVkit expected output. Handles previous non-batch cases to avoid re-calculating, returning both new and old values: """ batch = dd.get_batch(data) or dd.get_sample_name(data) out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0] base = os.path.join(work_dir, out_base) return "%s-%s" % (base, batch), base
[ "def", "_bam_to_outbase", "(", "bam_file", ",", "work_dir", ",", "data", ")", ":", "batch", "=", "dd", ".", "get_batch", "(", "data", ")", "or", "dd", ".", "get_sample_name", "(", "data", ")", "out_base", "=", "os", ".", "path", ".", "splitext", "(", ...
Convert an input BAM file into CNVkit expected output. Handles previous non-batch cases to avoid re-calculating, returning both new and old values:
[ "Convert", "an", "input", "BAM", "file", "into", "CNVkit", "expected", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L174-L183
223,270
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_run_cnvkit_shared
def _run_cnvkit_shared(inputs, backgrounds): """Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. """ if tz.get_in(["depth", "bins", "normalized"], inputs[0]): ckouts = [] for data in inputs: cnr_file = tz.get_in(["depth", "bins", "normalized"], data) cns_file = os.path.join(_sv_workdir(data), "%s.cns" % dd.get_sample_name(data)) cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, inputs + backgrounds, cns_file) ckouts.append({"cnr": cnr_file, "cns": cns_file, "background": tz.get_in(["depth", "bins", "background"], data)}) return ckouts else: return _run_cnvkit_shared_orig(inputs, backgrounds)
python
def _run_cnvkit_shared(inputs, backgrounds): """Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. """ if tz.get_in(["depth", "bins", "normalized"], inputs[0]): ckouts = [] for data in inputs: cnr_file = tz.get_in(["depth", "bins", "normalized"], data) cns_file = os.path.join(_sv_workdir(data), "%s.cns" % dd.get_sample_name(data)) cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, inputs + backgrounds, cns_file) ckouts.append({"cnr": cnr_file, "cns": cns_file, "background": tz.get_in(["depth", "bins", "background"], data)}) return ckouts else: return _run_cnvkit_shared_orig(inputs, backgrounds)
[ "def", "_run_cnvkit_shared", "(", "inputs", ",", "backgrounds", ")", ":", "if", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "\"bins\"", ",", "\"normalized\"", "]", ",", "inputs", "[", "0", "]", ")", ":", "ckouts", "=", "[", "]", "for", "data", "...
Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually.
[ "Shared", "functionality", "to", "run", "CNVkit", "parallelizing", "over", "multiple", "BAM", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L201-L218
223,271
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_get_general_coverage
def _get_general_coverage(data, itype): """Retrieve coverage information from new shared SV bins. """ work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) return [{"bam": work_bam, "file": tz.get_in(["depth", "bins", "target"], data), "cnntype": "target", "itype": itype, "sample": dd.get_sample_name(data)}, {"bam": work_bam, "file": tz.get_in(["depth", "bins", "antitarget"], data), "cnntype": "antitarget", "itype": itype, "sample": dd.get_sample_name(data)}]
python
def _get_general_coverage(data, itype): """Retrieve coverage information from new shared SV bins. """ work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) return [{"bam": work_bam, "file": tz.get_in(["depth", "bins", "target"], data), "cnntype": "target", "itype": itype, "sample": dd.get_sample_name(data)}, {"bam": work_bam, "file": tz.get_in(["depth", "bins", "antitarget"], data), "cnntype": "antitarget", "itype": itype, "sample": dd.get_sample_name(data)}]
[ "def", "_get_general_coverage", "(", "data", ",", "itype", ")", ":", "work_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "return", "[", "{", "\"bam\"", ":", "work_bam", ",", "\"file\"", ":", ...
Retrieve coverage information from new shared SV bins.
[ "Retrieve", "coverage", "information", "from", "new", "shared", "SV", "bins", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L228-L235
223,272
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_segment
def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False): """Perform segmentation and copy number calling on normalized inputs """ if not out_file: out_file = "%s.cns" % os.path.splitext(cnr_file)[0] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: if not _cna_has_values(cnr_file): with open(tx_out_file, "w") as out_handle: out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n") else: # Scale cores to avoid memory issues with segmentation # https://github.com/etal/cnvkit/issues/346 if cov_interval == "genome": cores = max(1, dd.get_cores(data) // 2) else: cores = dd.get_cores(data) cmd = [_get_cmd(), "segment", "-p", str(cores), "-o", tx_out_file, cnr_file] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome": cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] resources = config_utils.get_resources("cnvkit_segment", data["config"]) user_options = resources.get("options", []) cmd += [str(x) for x in user_options] if cov_interval == "genome" and "--threshold" not in user_options: cmd += ["--threshold", "0.00001"] # For tumors, remove very low normalized regions, avoiding upcaptured noise # https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650 # unless we want detailed segmentation for downstream tools paired = vcfutils.get_paired(items) if paired: #if detailed: # cmd += ["-m", "hmm-tumor"] if "--drop-low-coverage" not in user_options: cmd += ["--drop-low-coverage"] # preferentially use conda installed Rscript export_cmd = ("%s && export TMPDIR=%s && " % (utils.get_R_exports(), os.path.dirname(tx_out_file))) do.run(export_cmd + " ".join(cmd), "CNVkit segment") return out_file
python
def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False): """Perform segmentation and copy number calling on normalized inputs """ if not out_file: out_file = "%s.cns" % os.path.splitext(cnr_file)[0] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: if not _cna_has_values(cnr_file): with open(tx_out_file, "w") as out_handle: out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n") else: # Scale cores to avoid memory issues with segmentation # https://github.com/etal/cnvkit/issues/346 if cov_interval == "genome": cores = max(1, dd.get_cores(data) // 2) else: cores = dd.get_cores(data) cmd = [_get_cmd(), "segment", "-p", str(cores), "-o", tx_out_file, cnr_file] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome": cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] resources = config_utils.get_resources("cnvkit_segment", data["config"]) user_options = resources.get("options", []) cmd += [str(x) for x in user_options] if cov_interval == "genome" and "--threshold" not in user_options: cmd += ["--threshold", "0.00001"] # For tumors, remove very low normalized regions, avoiding upcaptured noise # https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650 # unless we want detailed segmentation for downstream tools paired = vcfutils.get_paired(items) if paired: #if detailed: # cmd += ["-m", "hmm-tumor"] if "--drop-low-coverage" not in user_options: cmd += ["--drop-low-coverage"] # preferentially use conda installed Rscript export_cmd = ("%s && export TMPDIR=%s && " % (utils.get_R_exports(), os.path.dirname(tx_out_file))) do.run(export_cmd + " ".join(cmd), "CNVkit segment") return out_file
[ "def", "_cnvkit_segment", "(", "cnr_file", ",", "cov_interval", ",", "data", ",", "items", ",", "out_file", "=", "None", ",", "detailed", "=", "False", ")", ":", "if", "not", "out_file", ":", "out_file", "=", "\"%s.cns\"", "%", "os", ".", "path", ".", ...
Perform segmentation and copy number calling on normalized inputs
[ "Perform", "segmentation", "and", "copy", "number", "calling", "on", "normalized", "inputs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L297-L338
223,273
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_metrics
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items): """Estimate noise of a sample using a flat background. Only used for panel/targeted data due to memory issues with whole genome samples. """ if cov_interval == "genome": return cnns target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0] background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0] background_file = cnvkit_background([], background_file, items, target_bed, antitarget_bed) cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground") cns_file = _cnvkit_segment(cnr_file, cov_interval, data) metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0] if not utils.file_exists(metrics_file): with file_transaction(data, metrics_file) as tx_metrics_file: cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file] do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics") metrics = _read_metrics_file(metrics_file) out = [] for cnn in cnns: cnn["metrics"] = metrics out.append(cnn) return out
python
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items): """Estimate noise of a sample using a flat background. Only used for panel/targeted data due to memory issues with whole genome samples. """ if cov_interval == "genome": return cnns target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0] background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0] background_file = cnvkit_background([], background_file, items, target_bed, antitarget_bed) cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground") cns_file = _cnvkit_segment(cnr_file, cov_interval, data) metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0] if not utils.file_exists(metrics_file): with file_transaction(data, metrics_file) as tx_metrics_file: cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file] do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics") metrics = _read_metrics_file(metrics_file) out = [] for cnn in cnns: cnn["metrics"] = metrics out.append(cnn) return out
[ "def", "_cnvkit_metrics", "(", "cnns", ",", "target_bed", ",", "antitarget_bed", ",", "cov_interval", ",", "items", ")", ":", "if", "cov_interval", "==", "\"genome\"", ":", "return", "cnns", "target_cnn", "=", "[", "x", "[", "\"file\"", "]", "for", "x", "i...
Estimate noise of a sample using a flat background. Only used for panel/targeted data due to memory issues with whole genome samples.
[ "Estimate", "noise", "of", "a", "sample", "using", "a", "flat", "background", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L340-L364
223,274
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_fix
def _cnvkit_fix(cnns, background_cnn, items, ckouts): """Normalize samples, correcting sources of bias. """ return [_cnvkit_fix_base(cnns, background_cnn, items, ckouts)]
python
def _cnvkit_fix(cnns, background_cnn, items, ckouts): """Normalize samples, correcting sources of bias. """ return [_cnvkit_fix_base(cnns, background_cnn, items, ckouts)]
[ "def", "_cnvkit_fix", "(", "cnns", ",", "background_cnn", ",", "items", ",", "ckouts", ")", ":", "return", "[", "_cnvkit_fix_base", "(", "cnns", ",", "background_cnn", ",", "items", ",", "ckouts", ")", "]" ]
Normalize samples, correcting sources of bias.
[ "Normalize", "samples", "correcting", "sources", "of", "bias", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L374-L377
223,275
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_select_background_cnns
def _select_background_cnns(cnns): """Select cnns to use for background calculations. Uses background samples in cohort, and will remove CNNs with high on target variability. Uses (number of segments * biweight midvariance) as metric for variability with higher numbers being more unreliable. """ min_for_variability_analysis = 20 pct_keep = 0.10 b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" if len(b_cnns) >= min_for_variability_analysis: b_cnns_w_metrics = [] for b_cnn in b_cnns: unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"] b_cnns_w_metrics.append((unreliability, b_cnn)) b_cnns_w_metrics.sort() to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2) b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" return [x["file"] for x in b_cnns]
python
def _select_background_cnns(cnns): """Select cnns to use for background calculations. Uses background samples in cohort, and will remove CNNs with high on target variability. Uses (number of segments * biweight midvariance) as metric for variability with higher numbers being more unreliable. """ min_for_variability_analysis = 20 pct_keep = 0.10 b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" if len(b_cnns) >= min_for_variability_analysis: b_cnns_w_metrics = [] for b_cnn in b_cnns: unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"] b_cnns_w_metrics.append((unreliability, b_cnn)) b_cnns_w_metrics.sort() to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2) b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" return [x["file"] for x in b_cnns]
[ "def", "_select_background_cnns", "(", "cnns", ")", ":", "min_for_variability_analysis", "=", "20", "pct_keep", "=", "0.10", "b_cnns", "=", "[", "x", "for", "x", "in", "cnns", "if", "x", "[", "\"itype\"", "]", "==", "\"background\"", "and", "x", ".", "get"...
Select cnns to use for background calculations. Uses background samples in cohort, and will remove CNNs with high on target variability. Uses (number of segments * biweight midvariance) as metric for variability with higher numbers being more unreliable.
[ "Select", "cnns", "to", "use", "for", "background", "calculations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L400-L420
223,276
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
cnvkit_background
def cnvkit_background(background_cnns, out_file, items, target_bed=None, antitarget_bed=None): """Calculate background reference, handling flat case with no normal sample. """ if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(items[0]), "-o", tx_out_file] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] if len(background_cnns) == 0: assert target_bed and antitarget_bed, "Missing CNNs and target BEDs for flat background" cmd += ["-t", target_bed, "-a", antitarget_bed] else: cmd += background_cnns do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background") return out_file
python
def cnvkit_background(background_cnns, out_file, items, target_bed=None, antitarget_bed=None): """Calculate background reference, handling flat case with no normal sample. """ if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(items[0]), "-o", tx_out_file] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] if len(background_cnns) == 0: assert target_bed and antitarget_bed, "Missing CNNs and target BEDs for flat background" cmd += ["-t", target_bed, "-a", antitarget_bed] else: cmd += background_cnns do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background") return out_file
[ "def", "cnvkit_background", "(", "background_cnns", ",", "out_file", ",", "items", ",", "target_bed", "=", "None", ",", "antitarget_bed", "=", "None", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", ...
Calculate background reference, handling flat case with no normal sample.
[ "Calculate", "background", "reference", "handling", "flat", "case", "with", "no", "normal", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L422-L437
223,277
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_get_batch_gender
def _get_batch_gender(items): """Retrieve gender for a batch of items if consistent. Better not to specify for mixed populations, CNVkit will work it out https://github.com/bcbio/bcbio-nextgen/commit/1a0e217c8a4d3cee10fa890fb3cfd4db5034281d#r26279752 """ genders = set([population.get_gender(x) for x in items]) if len(genders) == 1: gender = genders.pop() if gender != "unknown": return gender
python
def _get_batch_gender(items): """Retrieve gender for a batch of items if consistent. Better not to specify for mixed populations, CNVkit will work it out https://github.com/bcbio/bcbio-nextgen/commit/1a0e217c8a4d3cee10fa890fb3cfd4db5034281d#r26279752 """ genders = set([population.get_gender(x) for x in items]) if len(genders) == 1: gender = genders.pop() if gender != "unknown": return gender
[ "def", "_get_batch_gender", "(", "items", ")", ":", "genders", "=", "set", "(", "[", "population", ".", "get_gender", "(", "x", ")", "for", "x", "in", "items", "]", ")", "if", "len", "(", "genders", ")", "==", "1", ":", "gender", "=", "genders", "....
Retrieve gender for a batch of items if consistent. Better not to specify for mixed populations, CNVkit will work it out https://github.com/bcbio/bcbio-nextgen/commit/1a0e217c8a4d3cee10fa890fb3cfd4db5034281d#r26279752
[ "Retrieve", "gender", "for", "a", "batch", "of", "items", "if", "consistent", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L440-L451
223,278
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
targets_w_bins
def targets_w_bins(cnv_file, access_file, target_anti_fn, work_dir, data): """Calculate target and anti-target files with pre-determined bins. """ target_file = os.path.join(work_dir, "%s-target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s-antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): target_bin, _ = target_anti_fn() with file_transaction(data, target_file) as tx_out_file: cmd = [_get_cmd(), "target", cnv_file, "--split", "-o", tx_out_file, "--avg-size", str(target_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target") if not os.path.exists(anti_file): _, anti_bin = target_anti_fn() with file_transaction(data, anti_file) as tx_out_file: # Create access file without targets to avoid overlap # antitarget in cnvkit is meant to do this but appears to not always happen # after chromosome 1 tx_access_file = os.path.join(os.path.dirname(tx_out_file), os.path.basename(access_file)) pybedtools.BedTool(access_file).subtract(cnv_file).saveas(tx_access_file) cmd = [_get_cmd(), "antitarget", "-g", tx_access_file, cnv_file, "-o", tx_out_file, "--avg-size", str(anti_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget") return target_file, anti_file
python
def targets_w_bins(cnv_file, access_file, target_anti_fn, work_dir, data): """Calculate target and anti-target files with pre-determined bins. """ target_file = os.path.join(work_dir, "%s-target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s-antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): target_bin, _ = target_anti_fn() with file_transaction(data, target_file) as tx_out_file: cmd = [_get_cmd(), "target", cnv_file, "--split", "-o", tx_out_file, "--avg-size", str(target_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target") if not os.path.exists(anti_file): _, anti_bin = target_anti_fn() with file_transaction(data, anti_file) as tx_out_file: # Create access file without targets to avoid overlap # antitarget in cnvkit is meant to do this but appears to not always happen # after chromosome 1 tx_access_file = os.path.join(os.path.dirname(tx_out_file), os.path.basename(access_file)) pybedtools.BedTool(access_file).subtract(cnv_file).saveas(tx_access_file) cmd = [_get_cmd(), "antitarget", "-g", tx_access_file, cnv_file, "-o", tx_out_file, "--avg-size", str(anti_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget") return target_file, anti_file
[ "def", "targets_w_bins", "(", "cnv_file", ",", "access_file", ",", "target_anti_fn", ",", "work_dir", ",", "data", ")", ":", "target_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-target.bed\"", "%", "dd", ".", "get_sample_name", "("...
Calculate target and anti-target files with pre-determined bins.
[ "Calculate", "target", "and", "anti", "-", "target", "files", "with", "pre", "-", "determined", "bins", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L453-L475
223,279
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
targets_from_background
def targets_from_background(back_cnn, work_dir, data): """Retrieve target and antitarget BEDs from background CNN file. """ target_file = os.path.join(work_dir, "%s.target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s.antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): with file_transaction(data, target_file) as tx_out_file: out_base = tx_out_file.replace(".target.bed", "") cmd = [_get_cmd("reference2targets.py"), "-o", out_base, back_cnn] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit targets from background") shutil.copy(out_base + ".antitarget.bed", anti_file) return target_file, anti_file
python
def targets_from_background(back_cnn, work_dir, data): """Retrieve target and antitarget BEDs from background CNN file. """ target_file = os.path.join(work_dir, "%s.target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s.antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): with file_transaction(data, target_file) as tx_out_file: out_base = tx_out_file.replace(".target.bed", "") cmd = [_get_cmd("reference2targets.py"), "-o", out_base, back_cnn] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit targets from background") shutil.copy(out_base + ".antitarget.bed", anti_file) return target_file, anti_file
[ "def", "targets_from_background", "(", "back_cnn", ",", "work_dir", ",", "data", ")", ":", "target_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.target.bed\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "anti_file", ...
Retrieve target and antitarget BEDs from background CNN file.
[ "Retrieve", "target", "and", "antitarget", "BEDs", "from", "background", "CNN", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L477-L488
223,280
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_seg_to_output
def _add_seg_to_output(out, data, enumerate_chroms=False): """Export outputs to 'seg' format compatible with IGV and GenePattern. """ out_file = "%s.seg" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", "seg"] if enumerate_chroms: cmd += ["--enumerate-chroms"] cmd += ["-o", tx_out_file, out["cns"]] do.run(cmd, "CNVkit export seg") out["seg"] = out_file return out
python
def _add_seg_to_output(out, data, enumerate_chroms=False): """Export outputs to 'seg' format compatible with IGV and GenePattern. """ out_file = "%s.seg" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", "seg"] if enumerate_chroms: cmd += ["--enumerate-chroms"] cmd += ["-o", tx_out_file, out["cns"]] do.run(cmd, "CNVkit export seg") out["seg"] = out_file return out
[ "def", "_add_seg_to_output", "(", "out", ",", "data", ",", "enumerate_chroms", "=", "False", ")", ":", "out_file", "=", "\"%s.seg\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "not", "utils", ...
Export outputs to 'seg' format compatible with IGV and GenePattern.
[ "Export", "outputs", "to", "seg", "format", "compatible", "with", "IGV", "and", "GenePattern", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L490-L503
223,281
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_variantcalls_to_output
def _add_variantcalls_to_output(out, data, items, is_somatic=False): """Call ploidy and convert into VCF and BED representations. """ call_file = "%s-call%s" % os.path.splitext(out["cns"]) if not utils.file_exists(call_file): with file_transaction(data, call_file) as tx_call_file: filters = ["--filter", "cn"] cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \ filters + \ ["--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_call_file, out["cns"]] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]): cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] if not is_somatic: cmd += ["-m", "clonal"] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit call ploidy") calls = {} for outformat in ["bed", "vcf"]: out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat) calls[outformat] = out_file if not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", outformat, "--sample-id", dd.get_sample_name(data), "--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_out_file, call_file] do.run(cmd, "CNVkit export %s" % outformat) out["call_file"] = call_file out["vrn_bed"] = annotate.add_genes(calls["bed"], data) effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff") out["vrn_file"] = effects_vcf or calls["vcf"] out["vrn_file"] = shared.annotate_with_depth(out["vrn_file"], items) return out
python
def _add_variantcalls_to_output(out, data, items, is_somatic=False): """Call ploidy and convert into VCF and BED representations. """ call_file = "%s-call%s" % os.path.splitext(out["cns"]) if not utils.file_exists(call_file): with file_transaction(data, call_file) as tx_call_file: filters = ["--filter", "cn"] cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \ filters + \ ["--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_call_file, out["cns"]] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]): cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] if not is_somatic: cmd += ["-m", "clonal"] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit call ploidy") calls = {} for outformat in ["bed", "vcf"]: out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat) calls[outformat] = out_file if not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", outformat, "--sample-id", dd.get_sample_name(data), "--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_out_file, call_file] do.run(cmd, "CNVkit export %s" % outformat) out["call_file"] = call_file out["vrn_bed"] = annotate.add_genes(calls["bed"], data) effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff") out["vrn_file"] = effects_vcf or calls["vcf"] out["vrn_file"] = shared.annotate_with_depth(out["vrn_file"], items) return out
[ "def", "_add_variantcalls_to_output", "(", "out", ",", "data", ",", "items", ",", "is_somatic", "=", "False", ")", ":", "call_file", "=", "\"%s-call%s\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "if", "not", "utils"...
Call ploidy and convert into VCF and BED representations.
[ "Call", "ploidy", "and", "convert", "into", "VCF", "and", "BED", "representations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L538-L576
223,282
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_segmetrics_to_output
def _add_segmetrics_to_output(out, data): """Add metrics for measuring reliability of CNV estimates. """ out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics", "--median", "--iqr", "--ci", "--pi", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] # Use less fine grained bootstrapping intervals for whole genome runs if dd.get_coverage_interval(data) == "genome": cmd += ["--alpha", "0.1", "--bootstrap", "50"] else: cmd += ["--alpha", "0.01", "--bootstrap", "500"] do.run(cmd, "CNVkit segmetrics") out["segmetrics"] = out_file return out
python
def _add_segmetrics_to_output(out, data): """Add metrics for measuring reliability of CNV estimates. """ out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics", "--median", "--iqr", "--ci", "--pi", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] # Use less fine grained bootstrapping intervals for whole genome runs if dd.get_coverage_interval(data) == "genome": cmd += ["--alpha", "0.1", "--bootstrap", "50"] else: cmd += ["--alpha", "0.01", "--bootstrap", "500"] do.run(cmd, "CNVkit segmetrics") out["segmetrics"] = out_file return out
[ "def", "_add_segmetrics_to_output", "(", "out", ",", "data", ")", ":", "out_file", "=", "\"%s-segmetrics.txt\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists", "(", ...
Add metrics for measuring reliability of CNV estimates.
[ "Add", "metrics", "for", "measuring", "reliability", "of", "CNV", "estimates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L578-L594
223,283
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_gainloss_to_output
def _add_gainloss_to_output(out, data): """Add gainloss based on genes, helpful for identifying changes in smaller genes. """ out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] gender = _get_batch_gender([data]) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit gainloss") out["gainloss"] = out_file return out
python
def _add_gainloss_to_output(out, data): """Add gainloss based on genes, helpful for identifying changes in smaller genes. """ out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] gender = _get_batch_gender([data]) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit gainloss") out["gainloss"] = out_file return out
[ "def", "_add_gainloss_to_output", "(", "out", ",", "data", ")", ":", "out_file", "=", "\"%s-gainloss.txt\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists", "(", "o...
Add gainloss based on genes, helpful for identifying changes in smaller genes.
[ "Add", "gainloss", "based", "on", "genes", "helpful", "for", "identifying", "changes", "in", "smaller", "genes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L596-L609
223,284
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_coverage_bedgraph_to_output
def _add_coverage_bedgraph_to_output(out, data): """Add BedGraph representation of coverage to the output """ out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0] if utils.file_exists(out_file): out["bedgraph"] = out_file return out bam_file = dd.get_align_bam(data) bedtools = config_utils.get_program("bedtools", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) cns_file = out["cns"] bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name with file_transaction(data, out_file) as tx_out_file: cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; " "{samtools} view -b -L {bed_file} {bam_file} | " "{bedtools} genomecov -bg -ibam - -g {bed_file} >" "{tx_out_file}").format(**locals()) do.run(cmd, "CNVkit bedGraph conversion") os.remove(bed_file) out["bedgraph"] = out_file return out
python
def _add_coverage_bedgraph_to_output(out, data): """Add BedGraph representation of coverage to the output """ out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0] if utils.file_exists(out_file): out["bedgraph"] = out_file return out bam_file = dd.get_align_bam(data) bedtools = config_utils.get_program("bedtools", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) cns_file = out["cns"] bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name with file_transaction(data, out_file) as tx_out_file: cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; " "{samtools} view -b -L {bed_file} {bam_file} | " "{bedtools} genomecov -bg -ibam - -g {bed_file} >" "{tx_out_file}").format(**locals()) do.run(cmd, "CNVkit bedGraph conversion") os.remove(bed_file) out["bedgraph"] = out_file return out
[ "def", "_add_coverage_bedgraph_to_output", "(", "out", ",", "data", ")", ":", "out_file", "=", "\"%s.coverage.bedgraph\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "utils", ".", "file_exists", "(",...
Add BedGraph representation of coverage to the output
[ "Add", "BedGraph", "representation", "of", "coverage", "to", "the", "output" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L611-L631
223,285
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_plots_to_output
def _add_plots_to_output(out, data): """Add CNVkit plots summarizing called copy number values. """ out["plot"] = {} diagram_plot = _add_diagram_plot(out, data) if diagram_plot: out["plot"]["diagram"] = diagram_plot scatter = _add_scatter_plot(out, data) if scatter: out["plot"]["scatter"] = scatter scatter_global = _add_global_scatter_plot(out, data) if scatter_global: out["plot"]["scatter_global"] = scatter_global return out
python
def _add_plots_to_output(out, data): """Add CNVkit plots summarizing called copy number values. """ out["plot"] = {} diagram_plot = _add_diagram_plot(out, data) if diagram_plot: out["plot"]["diagram"] = diagram_plot scatter = _add_scatter_plot(out, data) if scatter: out["plot"]["scatter"] = scatter scatter_global = _add_global_scatter_plot(out, data) if scatter_global: out["plot"]["scatter_global"] = scatter_global return out
[ "def", "_add_plots_to_output", "(", "out", ",", "data", ")", ":", "out", "[", "\"plot\"", "]", "=", "{", "}", "diagram_plot", "=", "_add_diagram_plot", "(", "out", ",", "data", ")", "if", "diagram_plot", ":", "out", "[", "\"plot\"", "]", "[", "\"diagram\...
Add CNVkit plots summarizing called copy number values.
[ "Add", "CNVkit", "plots", "summarizing", "called", "copy", "number", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L633-L646
223,286
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_get_larger_chroms
def _get_larger_chroms(ref_file): """Retrieve larger chromosomes, avoiding the smaller ones for plotting. """ from scipy.cluster.vq import kmeans, vq all_sizes = [] for c in ref.file_contigs(ref_file): all_sizes.append(float(c.size)) all_sizes.sort() if len(all_sizes) > 5: # separate out smaller chromosomes and haplotypes with kmeans centroids, _ = kmeans(np.array(all_sizes), 2) idx, _ = vq(np.array(all_sizes), centroids) little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes))) little_sizes = [x[1] for x in little_sizes] # create one more cluster with the smaller, removing the haplotypes centroids2, _ = kmeans(np.array(little_sizes), 2) idx2, _ = vq(np.array(little_sizes), centroids2) little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes))) little_sizes2 = [x[1] for x in little_sizes2] # get any chromosomes not in haplotype/random bin thresh = max(little_sizes2) else: thresh = 0 larger_chroms = [] for c in ref.file_contigs(ref_file): if c.size > thresh: larger_chroms.append(c.name) return larger_chroms
python
def _get_larger_chroms(ref_file): """Retrieve larger chromosomes, avoiding the smaller ones for plotting. """ from scipy.cluster.vq import kmeans, vq all_sizes = [] for c in ref.file_contigs(ref_file): all_sizes.append(float(c.size)) all_sizes.sort() if len(all_sizes) > 5: # separate out smaller chromosomes and haplotypes with kmeans centroids, _ = kmeans(np.array(all_sizes), 2) idx, _ = vq(np.array(all_sizes), centroids) little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes))) little_sizes = [x[1] for x in little_sizes] # create one more cluster with the smaller, removing the haplotypes centroids2, _ = kmeans(np.array(little_sizes), 2) idx2, _ = vq(np.array(little_sizes), centroids2) little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes))) little_sizes2 = [x[1] for x in little_sizes2] # get any chromosomes not in haplotype/random bin thresh = max(little_sizes2) else: thresh = 0 larger_chroms = [] for c in ref.file_contigs(ref_file): if c.size > thresh: larger_chroms.append(c.name) return larger_chroms
[ "def", "_get_larger_chroms", "(", "ref_file", ")", ":", "from", "scipy", ".", "cluster", ".", "vq", "import", "kmeans", ",", "vq", "all_sizes", "=", "[", "]", "for", "c", "in", "ref", ".", "file_contigs", "(", "ref_file", ")", ":", "all_sizes", ".", "a...
Retrieve larger chromosomes, avoiding the smaller ones for plotting.
[ "Retrieve", "larger", "chromosomes", "avoiding", "the", "smaller", "ones", "for", "plotting", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L648-L675
223,287
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
segment_from_cnr
def segment_from_cnr(cnr_file, data, out_base): """Provide segmentation on a cnr file, used in external PureCN integration. """ cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, [data], out_file="%s.cns" % out_base, detailed=True) out = _add_seg_to_output({"cns": cns_file}, data, enumerate_chroms=False) return out["seg"]
python
def segment_from_cnr(cnr_file, data, out_base): """Provide segmentation on a cnr file, used in external PureCN integration. """ cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, [data], out_file="%s.cns" % out_base, detailed=True) out = _add_seg_to_output({"cns": cns_file}, data, enumerate_chroms=False) return out["seg"]
[ "def", "segment_from_cnr", "(", "cnr_file", ",", "data", ",", "out_base", ")", ":", "cns_file", "=", "_cnvkit_segment", "(", "cnr_file", ",", "dd", ".", "get_coverage_interval", "(", "data", ")", ",", "data", ",", "[", "data", "]", ",", "out_file", "=", ...
Provide segmentation on a cnr file, used in external PureCN integration.
[ "Provide", "segmentation", "on", "a", "cnr", "file", "used", "in", "external", "PureCN", "integration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L747-L753
223,288
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
export_theta
def export_theta(ckout, data): """Provide updated set of data with export information for TheTA2 input. """ cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome") cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome") out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file] do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2") ckout["theta_input"] = out_file return ckout
python
def export_theta(ckout, data): """Provide updated set of data with export information for TheTA2 input. """ cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome") cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome") out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file] do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2") ckout["theta_input"] = out_file return ckout
[ "def", "export_theta", "(", "ckout", ",", "data", ")", ":", "cns_file", "=", "chromhacks", ".", "bed_to_standardonly", "(", "ckout", "[", "\"cns\"", "]", ",", "data", ",", "headers", "=", "\"chromosome\"", ")", "cnr_file", "=", "chromhacks", ".", "bed_to_sta...
Provide updated set of data with export information for TheTA2 input.
[ "Provide", "updated", "set", "of", "data", "with", "export", "information", "for", "TheTA2", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L757-L768
223,289
bcbio/bcbio-nextgen
bcbio/rnaseq/stringtie.py
_stringtie_expression
def _stringtie_expression(bam, data, out_dir="."): """ only estimate expression the Stringtie, do not assemble new transcripts """ gtf_file = dd.get_gtf_file(data) num_cores = dd.get_num_cores(data) error_message = "The %s file for %s is missing. StringTie has an error." stringtie = config_utils.get_program("stringtie", data, default="stringtie") # don't assemble transcripts unless asked exp_flag = ("-e" if "stringtie" not in dd.get_transcript_assembler(data) else "") base_cmd = ("{stringtie} {exp_flag} -b {out_dir} -p {num_cores} -G {gtf_file} " "-o {out_gtf} {bam}") transcript_file = os.path.join(out_dir, "t_data.ctab") exon_file = os.path.join(out_dir, "e_data.ctab") out_gtf = os.path.join(out_dir, "stringtie-assembly.gtf") if file_exists(transcript_file): return exon_file, transcript_file, out_gtf cmd = base_cmd.format(**locals()) do.run(cmd, "Running Stringtie on %s." % bam) assert file_exists(exon_file), error_message % ("exon", exon_file) assert file_exists(transcript_file), error_message % ("transcript", transcript_file) return transcript_file
python
def _stringtie_expression(bam, data, out_dir="."): """ only estimate expression the Stringtie, do not assemble new transcripts """ gtf_file = dd.get_gtf_file(data) num_cores = dd.get_num_cores(data) error_message = "The %s file for %s is missing. StringTie has an error." stringtie = config_utils.get_program("stringtie", data, default="stringtie") # don't assemble transcripts unless asked exp_flag = ("-e" if "stringtie" not in dd.get_transcript_assembler(data) else "") base_cmd = ("{stringtie} {exp_flag} -b {out_dir} -p {num_cores} -G {gtf_file} " "-o {out_gtf} {bam}") transcript_file = os.path.join(out_dir, "t_data.ctab") exon_file = os.path.join(out_dir, "e_data.ctab") out_gtf = os.path.join(out_dir, "stringtie-assembly.gtf") if file_exists(transcript_file): return exon_file, transcript_file, out_gtf cmd = base_cmd.format(**locals()) do.run(cmd, "Running Stringtie on %s." % bam) assert file_exists(exon_file), error_message % ("exon", exon_file) assert file_exists(transcript_file), error_message % ("transcript", transcript_file) return transcript_file
[ "def", "_stringtie_expression", "(", "bam", ",", "data", ",", "out_dir", "=", "\".\"", ")", ":", "gtf_file", "=", "dd", ".", "get_gtf_file", "(", "data", ")", "num_cores", "=", "dd", ".", "get_num_cores", "(", "data", ")", "error_message", "=", "\"The %s f...
only estimate expression the Stringtie, do not assemble new transcripts
[ "only", "estimate", "expression", "the", "Stringtie", "do", "not", "assemble", "new", "transcripts" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/stringtie.py#L21-L43
223,290
bcbio/bcbio-nextgen
bcbio/rnaseq/stringtie.py
run_stringtie_expression
def run_stringtie_expression(data): """ estimate expression from Stringtie, using the bcbio datadict does not do transcriptome assembly """ bam = dd.get_work_bam(data) sample_name = dd.get_sample_name(data) out_dir = os.path.join("stringtie", sample_name) isoform_fpkm = os.path.join(out_dir, sample_name + ".isoform.fpkm") gene_fpkm = os.path.join(out_dir, sample_name + ".fpkm") assembly = os.path.abspath(os.path.join(out_dir, "stringtie-assembly.gtf")) if file_exists(isoform_fpkm) and file_exists(gene_fpkm): data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data with file_transaction(data, out_dir) as tx_out_dir: transcript_file = _stringtie_expression(bam, data, tx_out_dir) df = _parse_ballgown(transcript_file) _write_fpkms(df, tx_out_dir, sample_name) data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data
python
def run_stringtie_expression(data): """ estimate expression from Stringtie, using the bcbio datadict does not do transcriptome assembly """ bam = dd.get_work_bam(data) sample_name = dd.get_sample_name(data) out_dir = os.path.join("stringtie", sample_name) isoform_fpkm = os.path.join(out_dir, sample_name + ".isoform.fpkm") gene_fpkm = os.path.join(out_dir, sample_name + ".fpkm") assembly = os.path.abspath(os.path.join(out_dir, "stringtie-assembly.gtf")) if file_exists(isoform_fpkm) and file_exists(gene_fpkm): data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data with file_transaction(data, out_dir) as tx_out_dir: transcript_file = _stringtie_expression(bam, data, tx_out_dir) df = _parse_ballgown(transcript_file) _write_fpkms(df, tx_out_dir, sample_name) data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data
[ "def", "run_stringtie_expression", "(", "data", ")", ":", "bam", "=", "dd", ".", "get_work_bam", "(", "data", ")", "sample_name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "out_dir", "=", "os", ".", "path", ".", "join", "(", "\"stringtie\"", "...
estimate expression from Stringtie, using the bcbio datadict does not do transcriptome assembly
[ "estimate", "expression", "from", "Stringtie", "using", "the", "bcbio", "datadict", "does", "not", "do", "transcriptome", "assembly" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/stringtie.py#L45-L76
223,291
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
update_file
def update_file(finfo, sample_info, config, pass_uptodate=False): """Update the file in local filesystem storage. """ storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config)) if finfo.get("type") == "directory": return _copy_finfo_directory(finfo, storage_dir) else: return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
python
def update_file(finfo, sample_info, config, pass_uptodate=False): """Update the file in local filesystem storage. """ storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config)) if finfo.get("type") == "directory": return _copy_finfo_directory(finfo, storage_dir) else: return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
[ "def", "update_file", "(", "finfo", ",", "sample_info", ",", "config", ",", "pass_uptodate", "=", "False", ")", ":", "storage_dir", "=", "utils", ".", "safe_makedir", "(", "_get_storage_dir", "(", "finfo", ",", "config", ")", ")", "if", "finfo", ".", "get"...
Update the file in local filesystem storage.
[ "Update", "the", "file", "in", "local", "filesystem", "storage", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L10-L17
223,292
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
_copy_finfo
def _copy_finfo(finfo, storage_dir, pass_uptodate=False): """Copy a file into the output storage directory. """ out_file = _get_file_upload_path(finfo, storage_dir) if not shared.up_to_date(out_file, finfo): logger.info("Storing in local filesystem: %s" % out_file) shutil.copy(finfo["path"], out_file) return out_file if pass_uptodate: return out_file
python
def _copy_finfo(finfo, storage_dir, pass_uptodate=False): """Copy a file into the output storage directory. """ out_file = _get_file_upload_path(finfo, storage_dir) if not shared.up_to_date(out_file, finfo): logger.info("Storing in local filesystem: %s" % out_file) shutil.copy(finfo["path"], out_file) return out_file if pass_uptodate: return out_file
[ "def", "_copy_finfo", "(", "finfo", ",", "storage_dir", ",", "pass_uptodate", "=", "False", ")", ":", "out_file", "=", "_get_file_upload_path", "(", "finfo", ",", "storage_dir", ")", "if", "not", "shared", ".", "up_to_date", "(", "out_file", ",", "finfo", ")...
Copy a file into the output storage directory.
[ "Copy", "a", "file", "into", "the", "output", "storage", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L63-L72
223,293
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
_copy_finfo_directory
def _copy_finfo_directory(finfo, out_dir): """Copy a directory into the final output directory. """ out_dir = _get_dir_upload_path(finfo, out_dir) if not shared.up_to_date(out_dir, finfo): logger.info("Storing directory in local filesystem: %s" % out_dir) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.copytree(finfo["path"], out_dir) for tmpdir in ["tx", "tmp"]: if os.path.exists(os.path.join(out_dir, tmpdir)): shutil.rmtree(os.path.join(out_dir, tmpdir)) os.utime(out_dir, None) return out_dir
python
def _copy_finfo_directory(finfo, out_dir): """Copy a directory into the final output directory. """ out_dir = _get_dir_upload_path(finfo, out_dir) if not shared.up_to_date(out_dir, finfo): logger.info("Storing directory in local filesystem: %s" % out_dir) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.copytree(finfo["path"], out_dir) for tmpdir in ["tx", "tmp"]: if os.path.exists(os.path.join(out_dir, tmpdir)): shutil.rmtree(os.path.join(out_dir, tmpdir)) os.utime(out_dir, None) return out_dir
[ "def", "_copy_finfo_directory", "(", "finfo", ",", "out_dir", ")", ":", "out_dir", "=", "_get_dir_upload_path", "(", "finfo", ",", "out_dir", ")", "if", "not", "shared", ".", "up_to_date", "(", "out_dir", ",", "finfo", ")", ":", "logger", ".", "info", "(",...
Copy a directory into the final output directory.
[ "Copy", "a", "directory", "into", "the", "final", "output", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L74-L87
223,294
bcbio/bcbio-nextgen
bcbio/variation/naming.py
handle_synonyms
def handle_synonyms(in_file, ref_file, genome_build, work_dir, data): """Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. """ if genome_build in GMAP and ref_file: mappings = GMAP[genome_build] contigs = set([c.name for c in ref.file_contigs(ref_file)]) out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_exists(out_file): if out_file.endswith(".gz"): out_file = out_file.replace(".gz", "") needs_bgzip = True else: needs_bgzip = False checked_file = "%s.checked" % utils.splitext_plus(out_file)[0] if not _matches_contigs(in_file, contigs, checked_file): with file_transaction(data, out_file) as tx_out_file: _write_newname_file(in_file, tx_out_file, mappings) if needs_bgzip: out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file return in_file
python
def handle_synonyms(in_file, ref_file, genome_build, work_dir, data): """Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. """ if genome_build in GMAP and ref_file: mappings = GMAP[genome_build] contigs = set([c.name for c in ref.file_contigs(ref_file)]) out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_exists(out_file): if out_file.endswith(".gz"): out_file = out_file.replace(".gz", "") needs_bgzip = True else: needs_bgzip = False checked_file = "%s.checked" % utils.splitext_plus(out_file)[0] if not _matches_contigs(in_file, contigs, checked_file): with file_transaction(data, out_file) as tx_out_file: _write_newname_file(in_file, tx_out_file, mappings) if needs_bgzip: out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file return in_file
[ "def", "handle_synonyms", "(", "in_file", ",", "ref_file", ",", "genome_build", ",", "work_dir", ",", "data", ")", ":", "if", "genome_build", "in", "GMAP", "and", "ref_file", ":", "mappings", "=", "GMAP", "[", "genome_build", "]", "contigs", "=", "set", "(...
Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column.
[ "Potentially", "handle", "remapping", "synonymous", "chromosome", "names", "between", "builds", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/naming.py#L101-L124
223,295
bcbio/bcbio-nextgen
bcbio/variation/naming.py
_write_newname_file
def _write_newname_file(in_file, out_file, mappings): """Re-write an input file with contigs matching the correct reference. """ with utils.open_gzipsafe(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.split("\t") new_contig = mappings.get(parts[0]) if new_contig: parts[0] = new_contig out_handle.write("\t".join(parts))
python
def _write_newname_file(in_file, out_file, mappings): """Re-write an input file with contigs matching the correct reference. """ with utils.open_gzipsafe(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.split("\t") new_contig = mappings.get(parts[0]) if new_contig: parts[0] = new_contig out_handle.write("\t".join(parts))
[ "def", "_write_newname_file", "(", "in_file", ",", "out_file", ",", "mappings", ")", ":", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for"...
Re-write an input file with contigs matching the correct reference.
[ "Re", "-", "write", "an", "input", "file", "with", "contigs", "matching", "the", "correct", "reference", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/naming.py#L126-L139
223,296
bcbio/bcbio-nextgen
bcbio/variation/naming.py
_matches_contigs
def _matches_contigs(in_file, contigs, checked_file): """Check if the contigs in the input file match the defined contigs in the reference genome. """ tocheck_contigs = 2 if utils.file_exists(checked_file): with open(checked_file) as in_handle: return in_handle.read().strip() == "match" else: with utils.open_gzipsafe(in_file) as in_handle: to_check = set([]) for line in in_handle: if not line.startswith("#"): to_check.add(line.split()[0]) if len(to_check) >= tocheck_contigs: break with open(checked_file, "w") as out_handle: if any([c not in contigs for c in to_check]): out_handle.write("different") return False else: out_handle.write("match") return True
python
def _matches_contigs(in_file, contigs, checked_file): """Check if the contigs in the input file match the defined contigs in the reference genome. """ tocheck_contigs = 2 if utils.file_exists(checked_file): with open(checked_file) as in_handle: return in_handle.read().strip() == "match" else: with utils.open_gzipsafe(in_file) as in_handle: to_check = set([]) for line in in_handle: if not line.startswith("#"): to_check.add(line.split()[0]) if len(to_check) >= tocheck_contigs: break with open(checked_file, "w") as out_handle: if any([c not in contigs for c in to_check]): out_handle.write("different") return False else: out_handle.write("match") return True
[ "def", "_matches_contigs", "(", "in_file", ",", "contigs", ",", "checked_file", ")", ":", "tocheck_contigs", "=", "2", "if", "utils", ".", "file_exists", "(", "checked_file", ")", ":", "with", "open", "(", "checked_file", ")", "as", "in_handle", ":", "return...
Check if the contigs in the input file match the defined contigs in the reference genome.
[ "Check", "if", "the", "contigs", "in", "the", "input", "file", "match", "the", "defined", "contigs", "in", "the", "reference", "genome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/naming.py#L141-L162
223,297
bcbio/bcbio-nextgen
bcbio/ngsalign/novoalign.py
align_bam
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform realignment of input BAM file; uses unix pipes for avoid IO. """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) novoalign = config_utils.get_program("novoalign", config) samtools = config_utils.get_program("samtools", config) resources = config_utils.get_resources("novoalign", config) num_cores = config["algorithm"].get("num_cores", 1) max_mem = resources.get("memory", "4G").upper() extra_novo_args = " ".join(_novoalign_args_from_config(config, False)) if not file_exists(out_file): with tx_tmpdir(data, base_dir=align_dir) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): rg_info = get_rg_info(names) tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {novoalign} -o SAM '{rg_info}' -d {ref_file} -f /dev/stdin " " -F BAMPE -c {num_cores} {extra_novo_args} | ") cmd = (cmd + tobam_cl).format(**locals()) do.run(cmd, "Novoalign: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
python
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform realignment of input BAM file; uses unix pipes for avoid IO. """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) novoalign = config_utils.get_program("novoalign", config) samtools = config_utils.get_program("samtools", config) resources = config_utils.get_resources("novoalign", config) num_cores = config["algorithm"].get("num_cores", 1) max_mem = resources.get("memory", "4G").upper() extra_novo_args = " ".join(_novoalign_args_from_config(config, False)) if not file_exists(out_file): with tx_tmpdir(data, base_dir=align_dir) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): rg_info = get_rg_info(names) tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {novoalign} -o SAM '{rg_info}' -d {ref_file} -f /dev/stdin " " -F BAMPE -c {num_cores} {extra_novo_args} | ") cmd = (cmd + tobam_cl).format(**locals()) do.run(cmd, "Novoalign: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
[ "def", "align_bam", "(", "in_bam", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "align_dir", ",", "\"{0}-sort.bam\"", ".", "...
Perform realignment of input BAM file; uses unix pipes for avoid IO.
[ "Perform", "realignment", "of", "input", "BAM", "file", ";", "uses", "unix", "pipes", "for", "avoid", "IO", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/novoalign.py#L29-L54
223,298
bcbio/bcbio-nextgen
bcbio/ngsalign/novoalign.py
_novoalign_args_from_config
def _novoalign_args_from_config(config, need_quality=True): """Select novoalign options based on configuration parameters. """ if need_quality: qual_format = config["algorithm"].get("quality_format", "").lower() qual_flags = ["-F", "ILMFQ" if qual_format == "illumina" else "STDFQ"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers") if multi_mappers is True: multi_flag = "Random" elif isinstance(multi_mappers, six.string_types): multi_flag = multi_mappers else: multi_flag = "None" multi_flags = ["-r"] + multi_flag.split() resources = config_utils.get_resources("novoalign", config) # default arguments for improved variant calling based on # comparisons to reference materials: turn off soft clipping and recalibrate if resources.get("options") is None: extra_args = ["-o", "FullNW", "-k"] else: extra_args = [str(x) for x in resources.get("options", [])] return qual_flags + multi_flags + extra_args
python
def _novoalign_args_from_config(config, need_quality=True): """Select novoalign options based on configuration parameters. """ if need_quality: qual_format = config["algorithm"].get("quality_format", "").lower() qual_flags = ["-F", "ILMFQ" if qual_format == "illumina" else "STDFQ"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers") if multi_mappers is True: multi_flag = "Random" elif isinstance(multi_mappers, six.string_types): multi_flag = multi_mappers else: multi_flag = "None" multi_flags = ["-r"] + multi_flag.split() resources = config_utils.get_resources("novoalign", config) # default arguments for improved variant calling based on # comparisons to reference materials: turn off soft clipping and recalibrate if resources.get("options") is None: extra_args = ["-o", "FullNW", "-k"] else: extra_args = [str(x) for x in resources.get("options", [])] return qual_flags + multi_flags + extra_args
[ "def", "_novoalign_args_from_config", "(", "config", ",", "need_quality", "=", "True", ")", ":", "if", "need_quality", ":", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", ".", "lower", "(", "...
Select novoalign options based on configuration parameters.
[ "Select", "novoalign", "options", "based", "on", "configuration", "parameters", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/novoalign.py#L92-L115
223,299
bcbio/bcbio-nextgen
bcbio/ngsalign/novoalign.py
remap_index_fn
def remap_index_fn(ref_file): """Map sequence references to equivalent novoalign indexes. """ checks = [os.path.splitext(ref_file)[0].replace("/seq/", "/novoalign/"), os.path.splitext(ref_file)[0] + ".ndx", ref_file + ".bs.ndx", ref_file + ".ndx"] for check in checks: if os.path.exists(check): return check return checks[0]
python
def remap_index_fn(ref_file): """Map sequence references to equivalent novoalign indexes. """ checks = [os.path.splitext(ref_file)[0].replace("/seq/", "/novoalign/"), os.path.splitext(ref_file)[0] + ".ndx", ref_file + ".bs.ndx", ref_file + ".ndx"] for check in checks: if os.path.exists(check): return check return checks[0]
[ "def", "remap_index_fn", "(", "ref_file", ")", ":", "checks", "=", "[", "os", ".", "path", ".", "splitext", "(", "ref_file", ")", "[", "0", "]", ".", "replace", "(", "\"/seq/\"", ",", "\"/novoalign/\"", ")", ",", "os", ".", "path", ".", "splitext", "...
Map sequence references to equivalent novoalign indexes.
[ "Map", "sequence", "references", "to", "equivalent", "novoalign", "indexes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/novoalign.py#L138-L148