id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
224,200
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
get_indelcaller
def get_indelcaller(d_or_c): """Retrieve string for indelcaller to use, or empty string if not specified. """ config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c indelcaller = config["algorithm"].get("indelcaller", "") if not indelcaller: indelcaller = "" if isinstance(indelcaller, (list, tuple)): indelcaller = indelcaller[0] if (len(indelcaller) > 0) else "" return indelcaller
python
def get_indelcaller(d_or_c): """Retrieve string for indelcaller to use, or empty string if not specified. """ config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c indelcaller = config["algorithm"].get("indelcaller", "") if not indelcaller: indelcaller = "" if isinstance(indelcaller, (list, tuple)): indelcaller = indelcaller[0] if (len(indelcaller) > 0) else "" return indelcaller
[ "def", "get_indelcaller", "(", "d_or_c", ")", ":", "config", "=", "d_or_c", "if", "isinstance", "(", "d_or_c", ",", "dict", ")", "and", "\"config\"", "in", "d_or_c", "else", "d_or_c", "indelcaller", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "("...
Retrieve string for indelcaller to use, or empty string if not specified.
[ "Retrieve", "string", "for", "indelcaller", "to", "use", "or", "empty", "string", "if", "not", "specified", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L140-L149
224,201
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
split_snps_indels
def split_snps_indels(orig_file, ref_file, config): """Split a variant call file into SNPs and INDELs for processing. """ base, ext = utils.splitext_plus(orig_file) snp_file = "{base}-snp{ext}".format(base=base, ext=ext) indel_file = "{base}-indel{ext}".format(base=base, ext=ext) for out_file, select_arg in [(snp_file, "--types snps"), (indel_file, "--exclude-types snps")]: if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}" do.run(cmd.format(**locals()), "Subset to SNPs and indels") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return snp_file, indel_file
python
def split_snps_indels(orig_file, ref_file, config): """Split a variant call file into SNPs and INDELs for processing. """ base, ext = utils.splitext_plus(orig_file) snp_file = "{base}-snp{ext}".format(base=base, ext=ext) indel_file = "{base}-indel{ext}".format(base=base, ext=ext) for out_file, select_arg in [(snp_file, "--types snps"), (indel_file, "--exclude-types snps")]: if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}" do.run(cmd.format(**locals()), "Subset to SNPs and indels") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return snp_file, indel_file
[ "def", "split_snps_indels", "(", "orig_file", ",", "ref_file", ",", "config", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "orig_file", ")", "snp_file", "=", "\"{base}-snp{ext}\"", ".", "format", "(", "base", "=", "base", ",", "ex...
Split a variant call file into SNPs and INDELs for processing.
[ "Split", "a", "variant", "call", "file", "into", "SNPs", "and", "INDELs", "for", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L183-L199
224,202
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
get_samples
def get_samples(in_file): """Retrieve samples present in a VCF file """ with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if line.startswith("#CHROM"): parts = line.strip().split("\t") return parts[9:] raise ValueError("Did not find sample header in VCF file %s" % in_file)
python
def get_samples(in_file): """Retrieve samples present in a VCF file """ with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if line.startswith("#CHROM"): parts = line.strip().split("\t") return parts[9:] raise ValueError("Did not find sample header in VCF file %s" % in_file)
[ "def", "get_samples", "(", "in_file", ")", ":", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"#CHROM\"", ")", ":", "parts", "=", "line",...
Retrieve samples present in a VCF file
[ "Retrieve", "samples", "present", "in", "a", "VCF", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L210-L218
224,203
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_get_exclude_samples
def _get_exclude_samples(in_file, to_exclude): """Identify samples in the exclusion list which are actually in the VCF. """ include, exclude = [], [] to_exclude = set(to_exclude) for s in get_samples(in_file): if s in to_exclude: exclude.append(s) else: include.append(s) return include, exclude
python
def _get_exclude_samples(in_file, to_exclude): """Identify samples in the exclusion list which are actually in the VCF. """ include, exclude = [], [] to_exclude = set(to_exclude) for s in get_samples(in_file): if s in to_exclude: exclude.append(s) else: include.append(s) return include, exclude
[ "def", "_get_exclude_samples", "(", "in_file", ",", "to_exclude", ")", ":", "include", ",", "exclude", "=", "[", "]", ",", "[", "]", "to_exclude", "=", "set", "(", "to_exclude", ")", "for", "s", "in", "get_samples", "(", "in_file", ")", ":", "if", "s",...
Identify samples in the exclusion list which are actually in the VCF.
[ "Identify", "samples", "in", "the", "exclusion", "list", "which", "are", "actually", "in", "the", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L220-L230
224,204
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
exclude_samples
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None): """Exclude specific samples from an input VCF file. """ include, exclude = _get_exclude_samples(in_file, to_exclude) # can use the input sample, all exclusions already gone if len(exclude) == 0: out_file = in_file elif not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" include_str = ",".join(include) filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude)) return out_file
python
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None): """Exclude specific samples from an input VCF file. """ include, exclude = _get_exclude_samples(in_file, to_exclude) # can use the input sample, all exclusions already gone if len(exclude) == 0: out_file = in_file elif not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" include_str = ",".join(include) filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude)) return out_file
[ "def", "exclude_samples", "(", "in_file", ",", "out_file", ",", "to_exclude", ",", "ref_file", ",", "config", ",", "filters", "=", "None", ")", ":", "include", ",", "exclude", "=", "_get_exclude_samples", "(", "in_file", ",", "to_exclude", ")", "# can use the ...
Exclude specific samples from an input VCF file.
[ "Exclude", "specific", "samples", "from", "an", "input", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L232-L247
224,205
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
select_sample
def select_sample(in_file, sample, out_file, config, filters=None): """Select a single sample from the supplied multisample VCF file. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: if len(get_samples(in_file)) == 1: shutil.copy(in_file, tx_out_file) else: if in_file.endswith(".gz"): bgzip_and_index(in_file, config) bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}" do.run(cmd.format(**locals()), "Select sample: %s" % sample) if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def select_sample(in_file, sample, out_file, config, filters=None): """Select a single sample from the supplied multisample VCF file. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: if len(get_samples(in_file)) == 1: shutil.copy(in_file, tx_out_file) else: if in_file.endswith(".gz"): bgzip_and_index(in_file, config) bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}" do.run(cmd.format(**locals()), "Select sample: %s" % sample) if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "select_sample", "(", "in_file", ",", "sample", ",", "out_file", ",", "config", ",", "filters", "=", "None", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ...
Select a single sample from the supplied multisample VCF file.
[ "Select", "a", "single", "sample", "from", "the", "supplied", "multisample", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L249-L266
224,206
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
merge_variant_files
def merge_variant_files(orig_files, out_file, ref_file, config, region=None): """Combine multiple VCF files with different samples into a single output file. Uses bcftools merge on bgzipped input files, handling both tricky merge and concatenation of files. Does not correctly handle files with the same sample (use combine_variant_files instead). """ in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] out_file = _do_merge(orig_files, out_file, config, region) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
python
def merge_variant_files(orig_files, out_file, ref_file, config, region=None): """Combine multiple VCF files with different samples into a single output file. Uses bcftools merge on bgzipped input files, handling both tricky merge and concatenation of files. Does not correctly handle files with the same sample (use combine_variant_files instead). """ in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] out_file = _do_merge(orig_files, out_file, config, region) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
[ "def", "merge_variant_files", "(", "orig_files", ",", "out_file", ",", "ref_file", ",", "config", ",", "region", "=", "None", ")", ":", "in_pipeline", "=", "False", "if", "isinstance", "(", "orig_files", ",", "dict", ")", ":", "file_key", "=", "config", "[...
Combine multiple VCF files with different samples into a single output file. Uses bcftools merge on bgzipped input files, handling both tricky merge and concatenation of files. Does not correctly handle files with the same sample (use combine_variant_files instead).
[ "Combine", "multiple", "VCF", "files", "with", "different", "samples", "into", "a", "single", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L288-L304
224,207
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_do_merge
def _do_merge(orig_files, out_file, config, region): """Do the actual work of merging with bcftools merge. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: _check_samples_nodups(orig_files) prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0] with open(input_vcf_file, "w") as out_handle: for fname in prep_files: out_handle.write(fname + "\n") bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" region_str = "-r {}".format(region) if region else "" cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}" do.run(cmd.format(**locals()), "Merge variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def _do_merge(orig_files, out_file, config, region): """Do the actual work of merging with bcftools merge. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: _check_samples_nodups(orig_files) prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0] with open(input_vcf_file, "w") as out_handle: for fname in prep_files: out_handle.write(fname + "\n") bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" region_str = "-r {}".format(region) if region else "" cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}" do.run(cmd.format(**locals()), "Merge variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "_do_merge", "(", "orig_files", ",", "out_file", ",", "config", ",", "region", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", ...
Do the actual work of merging with bcftools merge.
[ "Do", "the", "actual", "work", "of", "merging", "with", "bcftools", "merge", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L306-L324
224,208
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_check_samples_nodups
def _check_samples_nodups(fnames): """Ensure a set of input VCFs do not have duplicate samples. """ counts = defaultdict(int) for f in fnames: for s in get_samples(f): counts[s] += 1 duplicates = [s for s, c in counts.items() if c > 1] if duplicates: raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames))
python
def _check_samples_nodups(fnames): """Ensure a set of input VCFs do not have duplicate samples. """ counts = defaultdict(int) for f in fnames: for s in get_samples(f): counts[s] += 1 duplicates = [s for s, c in counts.items() if c > 1] if duplicates: raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames))
[ "def", "_check_samples_nodups", "(", "fnames", ")", ":", "counts", "=", "defaultdict", "(", "int", ")", "for", "f", "in", "fnames", ":", "for", "s", "in", "get_samples", "(", "f", ")", ":", "counts", "[", "s", "]", "+=", "1", "duplicates", "=", "[", ...
Ensure a set of input VCFs do not have duplicate samples.
[ "Ensure", "a", "set", "of", "input", "VCFs", "do", "not", "have", "duplicate", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L326-L335
224,209
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_sort_by_region
def _sort_by_region(fnames, regions, ref_file, config): """Sort a set of regionally split files by region for ordered output. """ contig_order = {} for i, sq in enumerate(ref.file_contigs(ref_file, config)): contig_order[sq.name] = i sitems = [] assert len(regions) == len(fnames), (regions, fnames) added_fnames = set([]) for region, fname in zip(regions, fnames): if fname not in added_fnames: if isinstance(region, (list, tuple)): c, s, e = region elif isinstance(region, six.string_types) and region.find(":") >= 0: c, coords = region.split(":") s, e = [int(x) for x in coords.split("-")] else: c = region s, e = 0, 0 sitems.append(((contig_order[c], s, e), c, fname)) added_fnames.add(fname) sitems.sort() return [(x[1], x[2]) for x in sitems]
python
def _sort_by_region(fnames, regions, ref_file, config): """Sort a set of regionally split files by region for ordered output. """ contig_order = {} for i, sq in enumerate(ref.file_contigs(ref_file, config)): contig_order[sq.name] = i sitems = [] assert len(regions) == len(fnames), (regions, fnames) added_fnames = set([]) for region, fname in zip(regions, fnames): if fname not in added_fnames: if isinstance(region, (list, tuple)): c, s, e = region elif isinstance(region, six.string_types) and region.find(":") >= 0: c, coords = region.split(":") s, e = [int(x) for x in coords.split("-")] else: c = region s, e = 0, 0 sitems.append(((contig_order[c], s, e), c, fname)) added_fnames.add(fname) sitems.sort() return [(x[1], x[2]) for x in sitems]
[ "def", "_sort_by_region", "(", "fnames", ",", "regions", ",", "ref_file", ",", "config", ")", ":", "contig_order", "=", "{", "}", "for", "i", ",", "sq", "in", "enumerate", "(", "ref", ".", "file_contigs", "(", "ref_file", ",", "config", ")", ")", ":", ...
Sort a set of regionally split files by region for ordered output.
[ "Sort", "a", "set", "of", "regionally", "split", "files", "by", "region", "for", "ordered", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L337-L359
224,210
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
concat_variant_files
def concat_variant_files(orig_files, out_file, regions, ref_file, config): """Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes. """ if not utils.file_exists(out_file): input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config) try: out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config) except subprocess.CalledProcessError as msg: if ("We require all VCFs to have complete VCF headers" in str(msg) or "Features added out of order" in str(msg) or "The reference allele cannot be missing" in str(msg)): out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True) else: raise if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def concat_variant_files(orig_files, out_file, regions, ref_file, config): """Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes. """ if not utils.file_exists(out_file): input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config) try: out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config) except subprocess.CalledProcessError as msg: if ("We require all VCFs to have complete VCF headers" in str(msg) or "Features added out of order" in str(msg) or "The reference allele cannot be missing" in str(msg)): out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True) else: raise if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "concat_variant_files", "(", "orig_files", ",", "out_file", ",", "regions", ",", "ref_file", ",", "config", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "input_file_list", "=", "_get_file_list", "(", "orig_files", ",", ...
Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes.
[ "Concatenate", "multiple", "variant", "files", "from", "regions", "into", "a", "single", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L361-L381
224,211
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_run_concat_variant_files_gatk4
def _run_concat_variant_files_gatk4(input_file_list, out_file, config): """Use GATK4 GatherVcfs for concatenation of scattered VCFs. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file] # Use GATK4 for merging, tools_off: [gatk4] applies to variant calling config = utils.deepish_copy(config) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") # Allow specification of verbosity in the unique style this tool uses resources = config_utils.get_resources("gatk", config) opts = [str(x) for x in resources.get("options", [])] if "--verbosity" in opts: params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]] broad_runner = broad.runner_from_config(config) broad_runner.run_gatk(params) return out_file
python
def _run_concat_variant_files_gatk4(input_file_list, out_file, config): """Use GATK4 GatherVcfs for concatenation of scattered VCFs. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file] # Use GATK4 for merging, tools_off: [gatk4] applies to variant calling config = utils.deepish_copy(config) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") # Allow specification of verbosity in the unique style this tool uses resources = config_utils.get_resources("gatk", config) opts = [str(x) for x in resources.get("options", [])] if "--verbosity" in opts: params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]] broad_runner = broad.runner_from_config(config) broad_runner.run_gatk(params) return out_file
[ "def", "_run_concat_variant_files_gatk4", "(", "input_file_list", ",", "out_file", ",", "config", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file",...
Use GATK4 GatherVcfs for concatenation of scattered VCFs.
[ "Use", "GATK4", "GatherVcfs", "for", "concatenation", "of", "scattered", "VCFs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L383-L400
224,212
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_get_file_list
def _get_file_list(orig_files, out_file, regions, ref_file, config): """Create file with region sorted list of non-empty VCFs for concatenating. """ sorted_files = _sort_by_region(orig_files, regions, ref_file, config) exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)] if len(exist_files) == 0: # no non-empty inputs, merge the empty ones exist_files = [x for c, x in sorted_files if os.path.exists(x)] elif len(exist_files) > 1: exist_files = _fix_gatk_header(exist_files, out_file, config) else: exist_files = [x for c, x in exist_files] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0] with open(input_file_list, "w") as out_handle: for fname in ready_files: out_handle.write(fname + "\n") return input_file_list
python
def _get_file_list(orig_files, out_file, regions, ref_file, config): """Create file with region sorted list of non-empty VCFs for concatenating. """ sorted_files = _sort_by_region(orig_files, regions, ref_file, config) exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)] if len(exist_files) == 0: # no non-empty inputs, merge the empty ones exist_files = [x for c, x in sorted_files if os.path.exists(x)] elif len(exist_files) > 1: exist_files = _fix_gatk_header(exist_files, out_file, config) else: exist_files = [x for c, x in exist_files] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0] with open(input_file_list, "w") as out_handle: for fname in ready_files: out_handle.write(fname + "\n") return input_file_list
[ "def", "_get_file_list", "(", "orig_files", ",", "out_file", ",", "regions", ",", "ref_file", ",", "config", ")", ":", "sorted_files", "=", "_sort_by_region", "(", "orig_files", ",", "regions", ",", "ref_file", ",", "config", ")", "exist_files", "=", "[", "(...
Create file with region sorted list of non-empty VCFs for concatenating.
[ "Create", "file", "with", "region", "sorted", "list", "of", "non", "-", "empty", "VCFs", "for", "concatenating", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L402-L418
224,213
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_fix_gatk_header
def _fix_gatk_header(exist_files, out_file, config): """Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header. """ from bcbio.variation import ploidy c, base_file = exist_files[0] replace_file = base_file items = [{"config": config}] if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1: for c, x in exist_files[1:]: if ploidy.get_ploidy(items, (c, 1, 2)) > 1: replace_file = x break base_fix_file = os.path.join(os.path.dirname(out_file), "%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file))) with file_transaction(config, base_fix_file) as tx_out_file: header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0] do.run("zgrep ^# %s > %s" % (replace_file, header_file), "Prepare header file for merging") resources = config_utils.get_resources("picard", config) ropts = [] if "options" in resources: ropts += [str(x) for x in resources.get("options", [])] do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" % (utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)), "Reheader initial VCF file in merge") bgzip_and_index(base_fix_file, config) return [base_fix_file] + [x for (c, x) in exist_files[1:]]
python
def _fix_gatk_header(exist_files, out_file, config): """Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header. """ from bcbio.variation import ploidy c, base_file = exist_files[0] replace_file = base_file items = [{"config": config}] if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1: for c, x in exist_files[1:]: if ploidy.get_ploidy(items, (c, 1, 2)) > 1: replace_file = x break base_fix_file = os.path.join(os.path.dirname(out_file), "%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file))) with file_transaction(config, base_fix_file) as tx_out_file: header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0] do.run("zgrep ^# %s > %s" % (replace_file, header_file), "Prepare header file for merging") resources = config_utils.get_resources("picard", config) ropts = [] if "options" in resources: ropts += [str(x) for x in resources.get("options", [])] do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" % (utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)), "Reheader initial VCF file in merge") bgzip_and_index(base_fix_file, config) return [base_fix_file] + [x for (c, x) in exist_files[1:]]
[ "def", "_fix_gatk_header", "(", "exist_files", ",", "out_file", ",", "config", ")", ":", "from", "bcbio", ".", "variation", "import", "ploidy", "c", ",", "base_file", "=", "exist_files", "[", "0", "]", "replace_file", "=", "base_file", "items", "=", "[", "...
Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header.
[ "Ensure", "consistent", "headers", "for", "VCF", "concatenation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L420-L451
224,214
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_run_concat_variant_files_bcftools
def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False): """Concatenate variant files using bcftools concat, potentially using the fast naive option. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" if naive: args = "--naive" else: args = "--allow-overlaps" cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}" do.run(cmd.format(**locals()), "bcftools concat variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False): """Concatenate variant files using bcftools concat, potentially using the fast naive option. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" if naive: args = "--naive" else: args = "--allow-overlaps" cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}" do.run(cmd.format(**locals()), "bcftools concat variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "_run_concat_variant_files_bcftools", "(", "in_list", ",", "out_file", ",", "config", ",", "naive", "=", "False", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ...
Concatenate variant files using bcftools concat, potentially using the fast naive option.
[ "Concatenate", "variant", "files", "using", "bcftools", "concat", "potentially", "using", "the", "fast", "naive", "option", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L465-L480
224,215
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
combine_variant_files
def combine_variant_files(orig_files, out_file, ref_file, config, quiet_out=True, region=None): """Combine VCF files from the same sample into a single output file. Handles cases where we split files into SNPs/Indels for processing then need to merge back into a final file. """ in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: exist_files = [x for x in orig_files if os.path.exists(x)] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0] cores = dd.get_num_cores({"config": config}) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \ ["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \ ["I=%s" % f for f in ready_files] cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd)) do.run(cmd, "Combine variant files") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
python
def combine_variant_files(orig_files, out_file, ref_file, config, quiet_out=True, region=None): """Combine VCF files from the same sample into a single output file. Handles cases where we split files into SNPs/Indels for processing then need to merge back into a final file. """ in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: exist_files = [x for x in orig_files if os.path.exists(x)] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0] cores = dd.get_num_cores({"config": config}) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \ ["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \ ["I=%s" % f for f in ready_files] cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd)) do.run(cmd, "Combine variant files") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
[ "def", "combine_variant_files", "(", "orig_files", ",", "out_file", ",", "ref_file", ",", "config", ",", "quiet_out", "=", "True", ",", "region", "=", "None", ")", ":", "in_pipeline", "=", "False", "if", "isinstance", "(", "orig_files", ",", "dict", ")", "...
Combine VCF files from the same sample into a single output file. Handles cases where we split files into SNPs/Indels for processing then need to merge back into a final file.
[ "Combine", "VCF", "files", "from", "the", "same", "sample", "into", "a", "single", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L482-L511
224,216
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
sort_by_ref
def sort_by_ref(vcf_file, data): """Sort a VCF file by genome reference and position, adding contig information. """ out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0] if not utils.file_uptodate(out_file, vcf_file): with file_transaction(data, out_file) as tx_out_file: header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: for region in ref.file_contigs(dd.get_ref_file(data), data["config"]): out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size)) cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat" cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | " "vt sort -m full -o {tx_out_file} -") with utils.chdir(os.path.dirname(tx_out_file)): do.run(cmd.format(**locals()), "Sort VCF by reference") return bgzip_and_index(out_file, data["config"])
python
def sort_by_ref(vcf_file, data): """Sort a VCF file by genome reference and position, adding contig information. """ out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0] if not utils.file_uptodate(out_file, vcf_file): with file_transaction(data, out_file) as tx_out_file: header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: for region in ref.file_contigs(dd.get_ref_file(data), data["config"]): out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size)) cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat" cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | " "vt sort -m full -o {tx_out_file} -") with utils.chdir(os.path.dirname(tx_out_file)): do.run(cmd.format(**locals()), "Sort VCF by reference") return bgzip_and_index(out_file, data["config"])
[ "def", "sort_by_ref", "(", "vcf_file", ",", "data", ")", ":", "out_file", "=", "\"%s-prep.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "vcf_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "vcf_file", ")",...
Sort a VCF file by genome reference and position, adding contig information.
[ "Sort", "a", "VCF", "file", "by", "genome", "reference", "and", "position", "adding", "contig", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L513-L528
224,217
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
add_contig_to_header
def add_contig_to_header(line, ref_file): """Streaming target to add contigs to a VCF file header. """ if line.startswith("##fileformat=VCF"): out = [line] for region in ref.file_contigs(ref_file): out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size)) return "\n".join(out) else: return line
python
def add_contig_to_header(line, ref_file): """Streaming target to add contigs to a VCF file header. """ if line.startswith("##fileformat=VCF"): out = [line] for region in ref.file_contigs(ref_file): out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size)) return "\n".join(out) else: return line
[ "def", "add_contig_to_header", "(", "line", ",", "ref_file", ")", ":", "if", "line", ".", "startswith", "(", "\"##fileformat=VCF\"", ")", ":", "out", "=", "[", "line", "]", "for", "region", "in", "ref", ".", "file_contigs", "(", "ref_file", ")", ":", "ou...
Streaming target to add contigs to a VCF file header.
[ "Streaming", "target", "to", "add", "contigs", "to", "a", "VCF", "file", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L539-L548
224,218
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
parallel_combine_variants
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel): """Combine variants in parallel by chromosome, concatenating final outputs. """ file_key = "vcf_files" def split_by_region(data): base, ext = utils.splitext_plus(os.path.basename(out_file)) args = [] for region in [x.name for x in ref.file_contigs(ref_file, config)]: region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base, "%s-%s%s" % (base, region, ext)) utils.safe_makedir(os.path.dirname(region_out)) args.append((region_out, ref_file, config, region)) return out_file, args config = copy.deepcopy(config) config["file_key"] = file_key prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) items = [[{file_key: prep_files}]] parallel_split_combine(items, split_by_region, run_parallel, "merge_variant_files", "concat_variant_files", file_key, ["region", "sam_ref", "config"], split_outfile_i=0) return out_file
python
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel): """Combine variants in parallel by chromosome, concatenating final outputs. """ file_key = "vcf_files" def split_by_region(data): base, ext = utils.splitext_plus(os.path.basename(out_file)) args = [] for region in [x.name for x in ref.file_contigs(ref_file, config)]: region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base, "%s-%s%s" % (base, region, ext)) utils.safe_makedir(os.path.dirname(region_out)) args.append((region_out, ref_file, config, region)) return out_file, args config = copy.deepcopy(config) config["file_key"] = file_key prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) items = [[{file_key: prep_files}]] parallel_split_combine(items, split_by_region, run_parallel, "merge_variant_files", "concat_variant_files", file_key, ["region", "sam_ref", "config"], split_outfile_i=0) return out_file
[ "def", "parallel_combine_variants", "(", "orig_files", ",", "out_file", ",", "ref_file", ",", "config", ",", "run_parallel", ")", ":", "file_key", "=", "\"vcf_files\"", "def", "split_by_region", "(", "data", ")", ":", "base", ",", "ext", "=", "utils", ".", "...
Combine variants in parallel by chromosome, concatenating final outputs.
[ "Combine", "variants", "in", "parallel", "by", "chromosome", "concatenating", "final", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L552-L572
224,219
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
move_vcf
def move_vcf(orig_file, new_file): """Move a VCF file with associated index. """ for ext in ["", ".idx", ".tbi"]: to_move = orig_file + ext if os.path.exists(to_move): shutil.move(to_move, new_file + ext)
python
def move_vcf(orig_file, new_file): """Move a VCF file with associated index. """ for ext in ["", ".idx", ".tbi"]: to_move = orig_file + ext if os.path.exists(to_move): shutil.move(to_move, new_file + ext)
[ "def", "move_vcf", "(", "orig_file", ",", "new_file", ")", ":", "for", "ext", "in", "[", "\"\"", ",", "\".idx\"", ",", "\".tbi\"", "]", ":", "to_move", "=", "orig_file", "+", "ext", "if", "os", ".", "path", ".", "exists", "(", "to_move", ")", ":", ...
Move a VCF file with associated index.
[ "Move", "a", "VCF", "file", "with", "associated", "index", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L576-L582
224,220
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
bgzip_and_index
def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None): """bgzip and tabix index an input file, handling VCF and BED. """ if config is None: config = {} out_file = in_file if in_file.endswith(".gz") else in_file + ".gz" if out_dir: remove_orig = False out_file = os.path.join(out_dir, os.path.basename(out_file)) if (not utils.file_exists(out_file) or not os.path.lexists(out_file) or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))): assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file assert os.path.exists(in_file), "Input file %s not found" % in_file if not utils.file_uptodate(out_file, in_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" if prep_cmd: prep_cmd = "| %s " % prep_cmd cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}" try: do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file)) except subprocess.CalledProcessError: # Race conditions: ignore errors where file has been deleted by another if os.path.exists(in_file) and not os.path.exists(out_file): raise if remove_orig: try: os.remove(in_file) except OSError: # Handle cases where run in parallel and file has been deleted pass tabix_index(out_file, config, tabix_args=tabix_args) return out_file
python
def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None): """bgzip and tabix index an input file, handling VCF and BED. """ if config is None: config = {} out_file = in_file if in_file.endswith(".gz") else in_file + ".gz" if out_dir: remove_orig = False out_file = os.path.join(out_dir, os.path.basename(out_file)) if (not utils.file_exists(out_file) or not os.path.lexists(out_file) or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))): assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file assert os.path.exists(in_file), "Input file %s not found" % in_file if not utils.file_uptodate(out_file, in_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" if prep_cmd: prep_cmd = "| %s " % prep_cmd cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}" try: do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file)) except subprocess.CalledProcessError: # Race conditions: ignore errors where file has been deleted by another if os.path.exists(in_file) and not os.path.exists(out_file): raise if remove_orig: try: os.remove(in_file) except OSError: # Handle cases where run in parallel and file has been deleted pass tabix_index(out_file, config, tabix_args=tabix_args) return out_file
[ "def", "bgzip_and_index", "(", "in_file", ",", "config", "=", "None", ",", "remove_orig", "=", "True", ",", "prep_cmd", "=", "\"\"", ",", "tabix_args", "=", "None", ",", "out_dir", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "="...
bgzip and tabix index an input file, handling VCF and BED.
[ "bgzip", "and", "tabix", "index", "an", "input", "file", "handling", "VCF", "and", "BED", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L584-L616
224,221
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
tabix_index
def tabix_index(in_file, config, preset=None, tabix_args=None): """Index a file using tabix. """ in_file = os.path.abspath(in_file) out_file = in_file + ".tbi" if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file): # Remove old index files to prevent linking into tx directory utils.remove_safe(out_file) with file_transaction(config, out_file) as tx_out_file: tabix = tools.get_tabix_cmd(config) tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_file, tx_in_file) if tabix_args: cmd = "{tabix} -f {tabix_args} {tx_in_file}" else: preset = _guess_preset(in_file) if preset is None else preset cmd = "{tabix} -f -p {preset} {tx_in_file}" do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file)) return out_file
python
def tabix_index(in_file, config, preset=None, tabix_args=None): """Index a file using tabix. """ in_file = os.path.abspath(in_file) out_file = in_file + ".tbi" if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file): # Remove old index files to prevent linking into tx directory utils.remove_safe(out_file) with file_transaction(config, out_file) as tx_out_file: tabix = tools.get_tabix_cmd(config) tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_file, tx_in_file) if tabix_args: cmd = "{tabix} -f {tabix_args} {tx_in_file}" else: preset = _guess_preset(in_file) if preset is None else preset cmd = "{tabix} -f -p {preset} {tx_in_file}" do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file)) return out_file
[ "def", "tabix_index", "(", "in_file", ",", "config", ",", "preset", "=", "None", ",", "tabix_args", "=", "None", ")", ":", "in_file", "=", "os", ".", "path", ".", "abspath", "(", "in_file", ")", "out_file", "=", "in_file", "+", "\".tbi\"", "if", "not",...
Index a file using tabix.
[ "Index", "a", "file", "using", "tabix", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L635-L653
224,222
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
is_gvcf_file
def is_gvcf_file(in_file): """Check if an input file is raw gVCF """ to_check = 100 n = 0 with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("##"): if n > to_check: break n += 1 parts = line.split("\t") # GATK if parts[4] == "<NON_REF>": return True # strelka2 if parts[4] == "." and parts[7].startswith("BLOCKAVG"): return True # freebayes if parts[4] == "<*>": return True # platypue if parts[4] == "N" and parts[6] == "REFCALL": return True
python
def is_gvcf_file(in_file): """Check if an input file is raw gVCF """ to_check = 100 n = 0 with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("##"): if n > to_check: break n += 1 parts = line.split("\t") # GATK if parts[4] == "<NON_REF>": return True # strelka2 if parts[4] == "." and parts[7].startswith("BLOCKAVG"): return True # freebayes if parts[4] == "<*>": return True # platypue if parts[4] == "N" and parts[6] == "REFCALL": return True
[ "def", "is_gvcf_file", "(", "in_file", ")", ":", "to_check", "=", "100", "n", "=", "0", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "not", "line", ".", "startswith", "...
Check if an input file is raw gVCF
[ "Check", "if", "an", "input", "file", "is", "raw", "gVCF" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L655-L678
224,223
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
cyvcf_add_filter
def cyvcf_add_filter(rec, name): """Add a FILTER value to a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
python
def cyvcf_add_filter(rec, name): """Add a FILTER value to a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
[ "def", "cyvcf_add_filter", "(", "rec", ",", "name", ")", ":", "if", "rec", ".", "FILTER", ":", "filters", "=", "rec", ".", "FILTER", ".", "split", "(", "\";\"", ")", "else", ":", "filters", "=", "[", "]", "if", "name", "not", "in", "filters", ":", ...
Add a FILTER value to a cyvcf2 record
[ "Add", "a", "FILTER", "value", "to", "a", "cyvcf2", "record" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L680-L690
224,224
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
cyvcf_remove_filter
def cyvcf_remove_filter(rec, name): """Remove filter with the given name from a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] new_filters = [x for x in filters if not str(x) == name] if len(new_filters) == 0: new_filters = ["PASS"] rec.FILTER = new_filters return rec
python
def cyvcf_remove_filter(rec, name): """Remove filter with the given name from a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] new_filters = [x for x in filters if not str(x) == name] if len(new_filters) == 0: new_filters = ["PASS"] rec.FILTER = new_filters return rec
[ "def", "cyvcf_remove_filter", "(", "rec", ",", "name", ")", ":", "if", "rec", ".", "FILTER", ":", "filters", "=", "rec", ".", "FILTER", ".", "split", "(", "\";\"", ")", "else", ":", "filters", "=", "[", "]", "new_filters", "=", "[", "x", "for", "x"...
Remove filter with the given name from a cyvcf2 record
[ "Remove", "filter", "with", "the", "given", "name", "from", "a", "cyvcf2", "record" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L692-L703
224,225
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
organize_noalign
def organize_noalign(data): """CWL target to skip alignment and organize input data. """ data = utils.to_single_data(data[0]) work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))) work_bam = os.path.join(work_dir, "%s-input.bam" % dd.get_sample_name(data)) if data.get("files"): if data["files"][0].endswith(".cram"): work_bam = cram.to_bam(data["files"][0], work_bam, data) else: assert data["files"][0].endswith(".bam"), data["files"][0] utils.copy_plus(data["files"][0], work_bam) bam.index(work_bam, data["config"]) else: work_bam = None data["align_bam"] = work_bam return data
python
def organize_noalign(data): """CWL target to skip alignment and organize input data. """ data = utils.to_single_data(data[0]) work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))) work_bam = os.path.join(work_dir, "%s-input.bam" % dd.get_sample_name(data)) if data.get("files"): if data["files"][0].endswith(".cram"): work_bam = cram.to_bam(data["files"][0], work_bam, data) else: assert data["files"][0].endswith(".bam"), data["files"][0] utils.copy_plus(data["files"][0], work_bam) bam.index(work_bam, data["config"]) else: work_bam = None data["align_bam"] = work_bam return data
[ "def", "organize_noalign", "(", "data", ")", ":", "data", "=", "utils", ".", "to_single_data", "(", "data", "[", "0", "]", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "...
CWL target to skip alignment and organize input data.
[ "CWL", "target", "to", "skip", "alignment", "and", "organize", "input", "data", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L53-L69
224,226
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
align_to_sort_bam
def align_to_sort_bam(fastq1, fastq2, aligner, data): """Align to the named genome build, returning a sorted BAM file. """ names = data["rgnames"] align_dir_parts = [data["dirs"]["work"], "align", names["sample"]] if data.get("disambiguate"): align_dir_parts.append(data["disambiguate"]["genome_build"]) aligner_index = _get_aligner_index(aligner, data) align_dir = utils.safe_makedir(os.path.join(*align_dir_parts)) ref_file = tz.get_in(("reference", "fasta", "base"), data) if fastq1.endswith(".bam"): data = _align_from_bam(fastq1, aligner, aligner_index, ref_file, names, align_dir, data) else: data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file, names, align_dir, data) if data["work_bam"] and utils.file_exists(data["work_bam"]): if data.get("align_split") and dd.get_mark_duplicates(data): # If merging later with with bamsormadup need query sorted inputs # but CWL requires a bai file. Create a fake one to make it happy. bam.fake_index(data["work_bam"], data) else: bam.index(data["work_bam"], data["config"]) for extra in ["-sr", "-disc"]: extra_bam = utils.append_stem(data['work_bam'], extra) if utils.file_exists(extra_bam): bam.index(extra_bam, data["config"]) return data
python
def align_to_sort_bam(fastq1, fastq2, aligner, data): """Align to the named genome build, returning a sorted BAM file. """ names = data["rgnames"] align_dir_parts = [data["dirs"]["work"], "align", names["sample"]] if data.get("disambiguate"): align_dir_parts.append(data["disambiguate"]["genome_build"]) aligner_index = _get_aligner_index(aligner, data) align_dir = utils.safe_makedir(os.path.join(*align_dir_parts)) ref_file = tz.get_in(("reference", "fasta", "base"), data) if fastq1.endswith(".bam"): data = _align_from_bam(fastq1, aligner, aligner_index, ref_file, names, align_dir, data) else: data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file, names, align_dir, data) if data["work_bam"] and utils.file_exists(data["work_bam"]): if data.get("align_split") and dd.get_mark_duplicates(data): # If merging later with with bamsormadup need query sorted inputs # but CWL requires a bai file. Create a fake one to make it happy. bam.fake_index(data["work_bam"], data) else: bam.index(data["work_bam"], data["config"]) for extra in ["-sr", "-disc"]: extra_bam = utils.append_stem(data['work_bam'], extra) if utils.file_exists(extra_bam): bam.index(extra_bam, data["config"]) return data
[ "def", "align_to_sort_bam", "(", "fastq1", ",", "fastq2", ",", "aligner", ",", "data", ")", ":", "names", "=", "data", "[", "\"rgnames\"", "]", "align_dir_parts", "=", "[", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"align\"", ",", "names...
Align to the named genome build, returning a sorted BAM file.
[ "Align", "to", "the", "named", "genome", "build", "returning", "a", "sorted", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L71-L98
224,227
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
get_aligner_with_aliases
def get_aligner_with_aliases(aligner, data): """Retrieve aligner index retriever, including aliases for shared. Handles tricky cases like gridss where we need bwa indices even with no aligner specified since they're used internally within GRIDSS. """ aligner_aliases = {"sentieon-bwa": "bwa"} from bcbio import structural if not aligner and "gridss" in structural.get_svcallers(data): aligner = "bwa" return aligner_aliases.get(aligner) or aligner
python
def get_aligner_with_aliases(aligner, data): """Retrieve aligner index retriever, including aliases for shared. Handles tricky cases like gridss where we need bwa indices even with no aligner specified since they're used internally within GRIDSS. """ aligner_aliases = {"sentieon-bwa": "bwa"} from bcbio import structural if not aligner and "gridss" in structural.get_svcallers(data): aligner = "bwa" return aligner_aliases.get(aligner) or aligner
[ "def", "get_aligner_with_aliases", "(", "aligner", ",", "data", ")", ":", "aligner_aliases", "=", "{", "\"sentieon-bwa\"", ":", "\"bwa\"", "}", "from", "bcbio", "import", "structural", "if", "not", "aligner", "and", "\"gridss\"", "in", "structural", ".", "get_sv...
Retrieve aligner index retriever, including aliases for shared. Handles tricky cases like gridss where we need bwa indices even with no aligner specified since they're used internally within GRIDSS.
[ "Retrieve", "aligner", "index", "retriever", "including", "aliases", "for", "shared", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L100-L110
224,228
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
_get_aligner_index
def _get_aligner_index(aligner, data): """Handle multiple specifications of aligner indexes, returning value to pass to aligner. Original bcbio case -- a list of indices. CWL case: a single file with secondaryFiles staged in the same directory. """ aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data) # standard bcbio case if aligner_indexes and isinstance(aligner_indexes, (list, tuple)): aligner_index = os.path.commonprefix(aligner_indexes) if aligner_index.endswith("."): aligner_index = aligner_index[:-1] return aligner_index # single file -- check for standard naming or directory elif aligner_indexes and os.path.exists(aligner_indexes): aligner_dir = os.path.dirname(aligner_indexes) aligner_prefix = os.path.splitext(aligner_indexes)[0] if len(glob.glob("%s.*" % aligner_prefix)) > 0: return aligner_prefix else: return aligner_dir if aligner not in allow_noindices(): raise ValueError("Did not find reference indices for aligner %s in genome: %s" % (aligner, data["reference"]))
python
def _get_aligner_index(aligner, data): """Handle multiple specifications of aligner indexes, returning value to pass to aligner. Original bcbio case -- a list of indices. CWL case: a single file with secondaryFiles staged in the same directory. """ aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data) # standard bcbio case if aligner_indexes and isinstance(aligner_indexes, (list, tuple)): aligner_index = os.path.commonprefix(aligner_indexes) if aligner_index.endswith("."): aligner_index = aligner_index[:-1] return aligner_index # single file -- check for standard naming or directory elif aligner_indexes and os.path.exists(aligner_indexes): aligner_dir = os.path.dirname(aligner_indexes) aligner_prefix = os.path.splitext(aligner_indexes)[0] if len(glob.glob("%s.*" % aligner_prefix)) > 0: return aligner_prefix else: return aligner_dir if aligner not in allow_noindices(): raise ValueError("Did not find reference indices for aligner %s in genome: %s" % (aligner, data["reference"]))
[ "def", "_get_aligner_index", "(", "aligner", ",", "data", ")", ":", "aligner_indexes", "=", "tz", ".", "get_in", "(", "(", "\"reference\"", ",", "get_aligner_with_aliases", "(", "aligner", ",", "data", ")", ",", "\"indexes\"", ")", ",", "data", ")", "# stand...
Handle multiple specifications of aligner indexes, returning value to pass to aligner. Original bcbio case -- a list of indices. CWL case: a single file with secondaryFiles staged in the same directory.
[ "Handle", "multiple", "specifications", "of", "aligner", "indexes", "returning", "value", "to", "pass", "to", "aligner", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L115-L139
224,229
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
_align_from_fastq
def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names, align_dir, data): """Align from fastq inputs, producing sorted BAM output. """ config = data["config"] align_fn = TOOLS[aligner].align_fn out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data) # handle align functions that update the main data dictionary in place if isinstance(out, dict): assert out.get("work_bam"), (dd.get_sample_name(data), out.get("work_bam")) return out # handle output of raw SAM files that need to be converted to BAM else: work_bam = bam.sam_to_bam(out, config) data["work_bam"] = bam.sort(work_bam, config) return data
python
def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names, align_dir, data): """Align from fastq inputs, producing sorted BAM output. """ config = data["config"] align_fn = TOOLS[aligner].align_fn out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data) # handle align functions that update the main data dictionary in place if isinstance(out, dict): assert out.get("work_bam"), (dd.get_sample_name(data), out.get("work_bam")) return out # handle output of raw SAM files that need to be converted to BAM else: work_bam = bam.sam_to_bam(out, config) data["work_bam"] = bam.sort(work_bam, config) return data
[ "def", "_align_from_fastq", "(", "fastq1", ",", "fastq2", ",", "aligner", ",", "align_ref", ",", "sam_ref", ",", "names", ",", "align_dir", ",", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "align_fn", "=", "TOOLS", "[", "aligner", "]...
Align from fastq inputs, producing sorted BAM output.
[ "Align", "from", "fastq", "inputs", "producing", "sorted", "BAM", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L155-L170
224,230
bcbio/bcbio-nextgen
bcbio/structural/gridss.py
_finalize_memory
def _finalize_memory(jvm_opts): """GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage """ avoid_min = 32 avoid_max = 48 out_opts = [] for opt in jvm_opts: if opt.startswith("-Xmx"): spec = opt[4:] val = int(spec[:-1]) mod = spec[-1] if mod.upper() == "M": adjust = 1024 min_val = avoid_min * 1024 max_val = avoid_max * 1024 else: adjust = 1 min_val, max_val = avoid_min, avoid_max if val >= min_val and val < max_val: val = min_val - adjust opt = "%s%s%s" % (opt[:4], val, mod) out_opts.append(opt) return out_opts
python
def _finalize_memory(jvm_opts): """GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage """ avoid_min = 32 avoid_max = 48 out_opts = [] for opt in jvm_opts: if opt.startswith("-Xmx"): spec = opt[4:] val = int(spec[:-1]) mod = spec[-1] if mod.upper() == "M": adjust = 1024 min_val = avoid_min * 1024 max_val = avoid_max * 1024 else: adjust = 1 min_val, max_val = avoid_min, avoid_max if val >= min_val and val < max_val: val = min_val - adjust opt = "%s%s%s" % (opt[:4], val, mod) out_opts.append(opt) return out_opts
[ "def", "_finalize_memory", "(", "jvm_opts", ")", ":", "avoid_min", "=", "32", "avoid_max", "=", "48", "out_opts", "=", "[", "]", "for", "opt", "in", "jvm_opts", ":", "if", "opt", ".", "startswith", "(", "\"-Xmx\"", ")", ":", "spec", "=", "opt", "[", ...
GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage
[ "GRIDSS", "does", "not", "recommend", "setting", "memory", "between", "32", "and", "48Gb", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gridss.py#L70-L94
224,231
bcbio/bcbio-nextgen
bcbio/structural/gridss.py
_setup_reference_files
def _setup_reference_files(data, tx_out_dir): """Create a reference directory with fasta and bwa indices. GRIDSS requires all files in a single directory, so setup with symlinks. This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases` during YAML sample setup. """ aligner = dd.get_aligner(data) or "bwa" out_dir = utils.safe_makedir(os.path.join(tx_out_dir, aligner)) ref_fasta = dd.get_ref_file(data) ref_files = ["%s%s" % (utils.splitext_plus(ref_fasta)[0], ext) for ext in [".fa", ".fa.fai", ".dict"]] for orig_file in ref_files + tz.get_in(("reference", aligner, "indexes"), data): utils.symlink_plus(orig_file, os.path.join(out_dir, os.path.basename(orig_file))) return os.path.join(out_dir, os.path.basename(ref_fasta))
python
def _setup_reference_files(data, tx_out_dir): """Create a reference directory with fasta and bwa indices. GRIDSS requires all files in a single directory, so setup with symlinks. This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases` during YAML sample setup. """ aligner = dd.get_aligner(data) or "bwa" out_dir = utils.safe_makedir(os.path.join(tx_out_dir, aligner)) ref_fasta = dd.get_ref_file(data) ref_files = ["%s%s" % (utils.splitext_plus(ref_fasta)[0], ext) for ext in [".fa", ".fa.fai", ".dict"]] for orig_file in ref_files + tz.get_in(("reference", aligner, "indexes"), data): utils.symlink_plus(orig_file, os.path.join(out_dir, os.path.basename(orig_file))) return os.path.join(out_dir, os.path.basename(ref_fasta))
[ "def", "_setup_reference_files", "(", "data", ",", "tx_out_dir", ")", ":", "aligner", "=", "dd", ".", "get_aligner", "(", "data", ")", "or", "\"bwa\"", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "tx_out_dir", ...
Create a reference directory with fasta and bwa indices. GRIDSS requires all files in a single directory, so setup with symlinks. This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases` during YAML sample setup.
[ "Create", "a", "reference", "directory", "with", "fasta", "and", "bwa", "indices", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gridss.py#L96-L109
224,232
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_add_versions
def _add_versions(samples): """Add tool and data versions to the summary. """ samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]), "data": provenancedata.write_versions(samples[0]["dirs"], samples)} return samples
python
def _add_versions(samples): """Add tool and data versions to the summary. """ samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]), "data": provenancedata.write_versions(samples[0]["dirs"], samples)} return samples
[ "def", "_add_versions", "(", "samples", ")", ":", "samples", "[", "0", "]", "[", "\"versions\"", "]", "=", "{", "\"tools\"", ":", "programs", ".", "write_versions", "(", "samples", "[", "0", "]", "[", "\"dirs\"", "]", ",", "samples", "[", "0", "]", "...
Add tool and data versions to the summary.
[ "Add", "tool", "and", "data", "versions", "to", "the", "summary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L109-L114
224,233
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_summarize_inputs
def _summarize_inputs(samples, out_dir): """Summarize inputs for MultiQC reporting in display. """ logger.info("summarize target information") if samples[0].get("analysis", "").lower() in ["variant", "variant2"]: metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) samples = _merge_target_information(samples, metrics_dir) logger.info("summarize fastqc") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc")) with utils.chdir(out_dir): _merge_fastqc(samples) preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)] if preseq_samples: logger.info("summarize preseq") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq")) with utils.chdir(out_dir): _merge_preseq(preseq_samples) return samples
python
def _summarize_inputs(samples, out_dir): """Summarize inputs for MultiQC reporting in display. """ logger.info("summarize target information") if samples[0].get("analysis", "").lower() in ["variant", "variant2"]: metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) samples = _merge_target_information(samples, metrics_dir) logger.info("summarize fastqc") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc")) with utils.chdir(out_dir): _merge_fastqc(samples) preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)] if preseq_samples: logger.info("summarize preseq") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq")) with utils.chdir(out_dir): _merge_preseq(preseq_samples) return samples
[ "def", "_summarize_inputs", "(", "samples", ",", "out_dir", ")", ":", "logger", ".", "info", "(", "\"summarize target information\"", ")", "if", "samples", "[", "0", "]", ".", "get", "(", "\"analysis\"", ",", "\"\"", ")", ".", "lower", "(", ")", "in", "[...
Summarize inputs for MultiQC reporting in display.
[ "Summarize", "inputs", "for", "MultiQC", "reporting", "in", "display", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L116-L135
224,234
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_work_path_to_rel_final_path
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir): """ Check if `path` is a work-rooted path, and convert to a relative final-rooted path """ if not path or not isinstance(path, str): return path upload_path = None # First, check in the mapping: if it's there is a direct reference and # it's a file, we immediately return it (saves lots of iterations) if upload_path_mapping.get(path) is not None and os.path.isfile(path): upload_path = upload_path_mapping[path] else: # Not a file: check for elements in the mapping that contain # it paths_to_check = [key for key in upload_path_mapping if path.startswith(key)] if paths_to_check: for work_path in paths_to_check: if os.path.isdir(work_path): final_path = upload_path_mapping[work_path] upload_path = path.replace(work_path, final_path) break if upload_path is not None: return os.path.relpath(upload_path, upload_base_dir) else: return None
python
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir): """ Check if `path` is a work-rooted path, and convert to a relative final-rooted path """ if not path or not isinstance(path, str): return path upload_path = None # First, check in the mapping: if it's there is a direct reference and # it's a file, we immediately return it (saves lots of iterations) if upload_path_mapping.get(path) is not None and os.path.isfile(path): upload_path = upload_path_mapping[path] else: # Not a file: check for elements in the mapping that contain # it paths_to_check = [key for key in upload_path_mapping if path.startswith(key)] if paths_to_check: for work_path in paths_to_check: if os.path.isdir(work_path): final_path = upload_path_mapping[work_path] upload_path = path.replace(work_path, final_path) break if upload_path is not None: return os.path.relpath(upload_path, upload_base_dir) else: return None
[ "def", "_work_path_to_rel_final_path", "(", "path", ",", "upload_path_mapping", ",", "upload_base_dir", ")", ":", "if", "not", "path", "or", "not", "isinstance", "(", "path", ",", "str", ")", ":", "return", "path", "upload_path", "=", "None", "# First, check in ...
Check if `path` is a work-rooted path, and convert to a relative final-rooted path
[ "Check", "if", "path", "is", "a", "work", "-", "rooted", "path", "and", "convert", "to", "a", "relative", "final", "-", "rooted", "path" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L195-L222
224,235
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_one_exists
def _one_exists(input_files): """ at least one file must exist for multiqc to run properly """ for f in input_files: if os.path.exists(f): return True return False
python
def _one_exists(input_files): """ at least one file must exist for multiqc to run properly """ for f in input_files: if os.path.exists(f): return True return False
[ "def", "_one_exists", "(", "input_files", ")", ":", "for", "f", "in", "input_files", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "return", "True", "return", "False" ]
at least one file must exist for multiqc to run properly
[ "at", "least", "one", "file", "must", "exist", "for", "multiqc", "to", "run", "properly" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L224-L231
224,236
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_get_input_files
def _get_input_files(samples, base_dir, tx_out_dir): """Retrieve input files, keyed by sample and QC method name. Stages files into the work directory to ensure correct names for MultiQC sample assessment when running with CWL. """ in_files = collections.defaultdict(list) for data in samples: sum_qc = tz.get_in(["summary", "qc"], data, {}) if sum_qc in [None, "None"]: sum_qc = {} elif isinstance(sum_qc, six.string_types): sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc} elif not isinstance(sum_qc, dict): raise ValueError("Unexpected summary qc: %s" % sum_qc) for program, pfiles in sum_qc.items(): if isinstance(pfiles, dict): pfiles = [pfiles["base"]] + pfiles.get("secondary", []) # CWL: presents output files as single file plus associated secondary files elif isinstance(pfiles, six.string_types): if os.path.exists(pfiles): pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames] else: pfiles = [] in_files[(dd.get_sample_name(data), program)].extend(pfiles) staged_files = [] for (sample, program), files in in_files.items(): cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program)) for f in files: if _check_multiqc_input(f) and _is_good_file_for_multiqc(f): if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]): staged_f = os.path.join(cur_dir, os.path.basename(f)) shutil.copy(f, staged_f) staged_files.append(staged_f) else: staged_files.append(f) staged_files.extend(get_qsig_multiqc_files(samples)) # Back compatible -- to migrate to explicit specifications in input YAML if not any([cwlutils.is_cwl_run(d) for d in samples]): staged_files += ["trimmed", "htseq-count/*summary"] # Add in created target_info file if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")): staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")] return sorted(list(set(staged_files)))
python
def _get_input_files(samples, base_dir, tx_out_dir): """Retrieve input files, keyed by sample and QC method name. Stages files into the work directory to ensure correct names for MultiQC sample assessment when running with CWL. """ in_files = collections.defaultdict(list) for data in samples: sum_qc = tz.get_in(["summary", "qc"], data, {}) if sum_qc in [None, "None"]: sum_qc = {} elif isinstance(sum_qc, six.string_types): sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc} elif not isinstance(sum_qc, dict): raise ValueError("Unexpected summary qc: %s" % sum_qc) for program, pfiles in sum_qc.items(): if isinstance(pfiles, dict): pfiles = [pfiles["base"]] + pfiles.get("secondary", []) # CWL: presents output files as single file plus associated secondary files elif isinstance(pfiles, six.string_types): if os.path.exists(pfiles): pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames] else: pfiles = [] in_files[(dd.get_sample_name(data), program)].extend(pfiles) staged_files = [] for (sample, program), files in in_files.items(): cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program)) for f in files: if _check_multiqc_input(f) and _is_good_file_for_multiqc(f): if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]): staged_f = os.path.join(cur_dir, os.path.basename(f)) shutil.copy(f, staged_f) staged_files.append(staged_f) else: staged_files.append(f) staged_files.extend(get_qsig_multiqc_files(samples)) # Back compatible -- to migrate to explicit specifications in input YAML if not any([cwlutils.is_cwl_run(d) for d in samples]): staged_files += ["trimmed", "htseq-count/*summary"] # Add in created target_info file if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")): staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")] return sorted(list(set(staged_files)))
[ "def", "_get_input_files", "(", "samples", ",", "base_dir", ",", "tx_out_dir", ")", ":", "in_files", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "samples", ":", "sum_qc", "=", "tz", ".", "get_in", "(", "[", "\"summary\"",...
Retrieve input files, keyed by sample and QC method name. Stages files into the work directory to ensure correct names for MultiQC sample assessment when running with CWL.
[ "Retrieve", "input", "files", "keyed", "by", "sample", "and", "QC", "method", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L233-L276
224,237
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_group_by_sample_and_batch
def _group_by_sample_and_batch(samples): """Group samples split by QC method back one per sample-batch. """ out = collections.defaultdict(list) for data in samples: out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data) return [xs[0] for xs in out.values()]
python
def _group_by_sample_and_batch(samples): """Group samples split by QC method back one per sample-batch. """ out = collections.defaultdict(list) for data in samples: out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data) return [xs[0] for xs in out.values()]
[ "def", "_group_by_sample_and_batch", "(", "samples", ")", ":", "out", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "samples", ":", "out", "[", "(", "dd", ".", "get_sample_name", "(", "data", ")", ",", "dd", ".", "get_ali...
Group samples split by QC method back one per sample-batch.
[ "Group", "samples", "split", "by", "QC", "method", "back", "one", "per", "sample", "-", "batch", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L287-L293
224,238
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_has_bcftools_germline_stats
def _has_bcftools_germline_stats(data): """Check for the presence of a germline stats file, CWL compatible. """ stats_file = tz.get_in(["summary", "qc"], data) if isinstance(stats_file, dict): stats_file = tz.get_in(["variants", "base"], stats_file) if not stats_file: stats_file = "" return stats_file.find("bcftools_stats_germline") > 0
python
def _has_bcftools_germline_stats(data): """Check for the presence of a germline stats file, CWL compatible. """ stats_file = tz.get_in(["summary", "qc"], data) if isinstance(stats_file, dict): stats_file = tz.get_in(["variants", "base"], stats_file) if not stats_file: stats_file = "" return stats_file.find("bcftools_stats_germline") > 0
[ "def", "_has_bcftools_germline_stats", "(", "data", ")", ":", "stats_file", "=", "tz", ".", "get_in", "(", "[", "\"summary\"", ",", "\"qc\"", "]", ",", "data", ")", "if", "isinstance", "(", "stats_file", ",", "dict", ")", ":", "stats_file", "=", "tz", "....
Check for the presence of a germline stats file, CWL compatible.
[ "Check", "for", "the", "presence", "of", "a", "germline", "stats", "file", "CWL", "compatible", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L397-L405
224,239
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_is_good_file_for_multiqc
def _is_good_file_for_multiqc(fpath): """Returns False if the file is binary or image.""" # Use mimetypes to exclude binary files where possible (ftype, encoding) = mimetypes.guess_type(fpath) if encoding is not None: return False if ftype is not None and ftype.startswith('image'): return False return True
python
def _is_good_file_for_multiqc(fpath): """Returns False if the file is binary or image.""" # Use mimetypes to exclude binary files where possible (ftype, encoding) = mimetypes.guess_type(fpath) if encoding is not None: return False if ftype is not None and ftype.startswith('image'): return False return True
[ "def", "_is_good_file_for_multiqc", "(", "fpath", ")", ":", "# Use mimetypes to exclude binary files where possible", "(", "ftype", ",", "encoding", ")", "=", "mimetypes", ".", "guess_type", "(", "fpath", ")", "if", "encoding", "is", "not", "None", ":", "return", ...
Returns False if the file is binary or image.
[ "Returns", "False", "if", "the", "file", "is", "binary", "or", "image", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L414-L422
224,240
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_parse_disambiguate
def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [0, 0, 0] with open(disambiguatestatsfilename, "r") as in_handle: for i, line in enumerate(in_handle): fields = line.strip().split("\t") if i == 0: assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs'] else: disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])] return disambig_stats
python
def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [0, 0, 0] with open(disambiguatestatsfilename, "r") as in_handle: for i, line in enumerate(in_handle): fields = line.strip().split("\t") if i == 0: assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs'] else: disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])] return disambig_stats
[ "def", "_parse_disambiguate", "(", "disambiguatestatsfilename", ")", ":", "disambig_stats", "=", "[", "0", ",", "0", ",", "0", "]", "with", "open", "(", "disambiguatestatsfilename", ",", "\"r\"", ")", "as", "in_handle", ":", "for", "i", ",", "line", "in", ...
Parse disambiguation stats from given file.
[ "Parse", "disambiguation", "stats", "from", "given", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L424-L435
224,241
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_merge_metrics
def _merge_metrics(samples, out_dir): """Merge metrics from multiple QC steps """ logger.info("summarize metrics") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) sample_metrics = collections.defaultdict(dict) for s in samples: s = _add_disambiguate(s) m = tz.get_in(['summary', 'metrics'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in m.keys(): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) out = [] for sample_name, m in sample_metrics.items(): sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name) with file_transaction(samples[0], sample_file) as tx_out_file: dt = pd.DataFrame(m, index=['1']) dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns] dt['sample'] = sample_name dt['rRNA_rate'] = m.get('rRNA_rate', "NA") dt['RiP_pct'] = "%.3f" % (int(m.get("RiP", 0)) / float(m.get("Total_reads", 1)) * 100) dt = _fix_duplicated_rate(dt) dt.transpose().to_csv(tx_out_file, sep="\t", header=False) out.append(sample_file) return out
python
def _merge_metrics(samples, out_dir): """Merge metrics from multiple QC steps """ logger.info("summarize metrics") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) sample_metrics = collections.defaultdict(dict) for s in samples: s = _add_disambiguate(s) m = tz.get_in(['summary', 'metrics'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in m.keys(): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) out = [] for sample_name, m in sample_metrics.items(): sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name) with file_transaction(samples[0], sample_file) as tx_out_file: dt = pd.DataFrame(m, index=['1']) dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns] dt['sample'] = sample_name dt['rRNA_rate'] = m.get('rRNA_rate', "NA") dt['RiP_pct'] = "%.3f" % (int(m.get("RiP", 0)) / float(m.get("Total_reads", 1)) * 100) dt = _fix_duplicated_rate(dt) dt.transpose().to_csv(tx_out_file, sep="\t", header=False) out.append(sample_file) return out
[ "def", "_merge_metrics", "(", "samples", ",", "out_dir", ")", ":", "logger", ".", "info", "(", "\"summarize metrics\"", ")", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"report\"", ",", "\"metri...
Merge metrics from multiple QC steps
[ "Merge", "metrics", "from", "multiple", "QC", "steps" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L456-L484
224,242
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_merge_fastqc
def _merge_fastqc(samples): """ merge all fastqc samples into one by module """ fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
python
def _merge_fastqc(samples): """ merge all fastqc samples into one by module """ fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
[ "def", "_merge_fastqc", "(", "samples", ")", ":", "fastqc_list", "=", "collections", ".", "defaultdict", "(", "list", ")", "seen", "=", "set", "(", ")", "for", "data", "in", "samples", ":", "name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "...
merge all fastqc samples into one by module
[ "merge", "all", "fastqc", "samples", "into", "one", "by", "module" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L486-L511
224,243
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
_create_plot
def _create_plot(tumor, in_glob, out_ext, page=1): """Create an output plot for the given PDF in the images directory. """ out_dir = utils.safe_makedir("images") out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext)) in_file = glob.glob(in_glob)[0] cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"] if not os.path.exists(out_name + ".png"): subprocess.check_call([str(x) for x in cmd]) return out_name + ".png"
python
def _create_plot(tumor, in_glob, out_ext, page=1): """Create an output plot for the given PDF in the images directory. """ out_dir = utils.safe_makedir("images") out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext)) in_file = glob.glob(in_glob)[0] cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"] if not os.path.exists(out_name + ".png"): subprocess.check_call([str(x) for x in cmd]) return out_name + ".png"
[ "def", "_create_plot", "(", "tumor", ",", "in_glob", ",", "out_ext", ",", "page", "=", "1", ")", ":", "out_dir", "=", "utils", ".", "safe_makedir", "(", "\"images\"", ")", "out_name", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-%s\"...
Create an output plot for the given PDF in the images directory.
[ "Create", "an", "output", "plot", "for", "the", "given", "PDF", "in", "the", "images", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L64-L73
224,244
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
_get_cromwell_execution_dir
def _get_cromwell_execution_dir(base_dir, target_glob): """Retrieve the baseline directory with cromwell output files. Handles Cromwell restarts where there are multiple work directories and we traverse symlinks back to the original. """ cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0] if os.path.exists(os.path.join(cur_dir, "cwl.output.json")): return base_dir else: symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script"))) ref_base = os.path.dirname(base_dir) new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0] return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob)
python
def _get_cromwell_execution_dir(base_dir, target_glob): """Retrieve the baseline directory with cromwell output files. Handles Cromwell restarts where there are multiple work directories and we traverse symlinks back to the original. """ cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0] if os.path.exists(os.path.join(cur_dir, "cwl.output.json")): return base_dir else: symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script"))) ref_base = os.path.dirname(base_dir) new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0] return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob)
[ "def", "_get_cromwell_execution_dir", "(", "base_dir", ",", "target_glob", ")", ":", "cur_dir", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "target_glob", ")", ")", "[", "0", "]", "if", "os", ".", "path", ".", ...
Retrieve the baseline directory with cromwell output files. Handles Cromwell restarts where there are multiple work directories and we traverse symlinks back to the original.
[ "Retrieve", "the", "baseline", "directory", "with", "cromwell", "output", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L232-L245
224,245
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_bam_inputs
def prep_bam_inputs(out_dir, sample, call_file, bam_file): """Prepare expected input BAM files from pre-aligned. """ base = utils.splitext_plus(os.path.basename(bam_file))[0] with open(call_file) as in_handle: for cur_hla in (x.strip() for x in in_handle): out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)), "%s.type.%s.filtered.bam" % (base, cur_hla)) if not os.path.exists(out_file): cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla] subprocess.check_call(cmd)
python
def prep_bam_inputs(out_dir, sample, call_file, bam_file): """Prepare expected input BAM files from pre-aligned. """ base = utils.splitext_plus(os.path.basename(bam_file))[0] with open(call_file) as in_handle: for cur_hla in (x.strip() for x in in_handle): out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)), "%s.type.%s.filtered.bam" % (base, cur_hla)) if not os.path.exists(out_file): cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla] subprocess.check_call(cmd)
[ "def", "prep_bam_inputs", "(", "out_dir", ",", "sample", ",", "call_file", ",", "bam_file", ")", ":", "base", "=", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(", "bam_file", ")", ")", "[", "0", "]", "with", "open", "(", ...
Prepare expected input BAM files from pre-aligned.
[ "Prepare", "expected", "input", "BAM", "files", "from", "pre", "-", "aligned", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L247-L257
224,246
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
get_hla
def get_hla(sample, cromwell_dir, hla_glob): """Retrieve HLA calls and input fastqs for a sample. """ hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0] fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq") calls = os.path.join(hla_dir, "%s-optitype.csv" % sample) return fastq, calls
python
def get_hla(sample, cromwell_dir, hla_glob): """Retrieve HLA calls and input fastqs for a sample. """ hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0] fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq") calls = os.path.join(hla_dir, "%s-optitype.csv" % sample) return fastq, calls
[ "def", "get_hla", "(", "sample", ",", "cromwell_dir", ",", "hla_glob", ")", ":", "hla_dir", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "cromwell_dir", ",", "hla_glob", ",", "\"align\"", ",", "sample", ",", "\"hla\"", ")", ")", ...
Retrieve HLA calls and input fastqs for a sample.
[ "Retrieve", "HLA", "calls", "and", "input", "fastqs", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L278-L284
224,247
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
name_to_absolute
def name_to_absolute(x): """Convert standard hg38 HLA name into ABSOLUTE naming. """ for c in ["-", "*", ":"]: x = x.replace(c, "_") x = x.lower() return x
python
def name_to_absolute(x): """Convert standard hg38 HLA name into ABSOLUTE naming. """ for c in ["-", "*", ":"]: x = x.replace(c, "_") x = x.lower() return x
[ "def", "name_to_absolute", "(", "x", ")", ":", "for", "c", "in", "[", "\"-\"", ",", "\"*\"", ",", "\":\"", "]", ":", "x", "=", "x", ".", "replace", "(", "c", ",", "\"_\"", ")", "x", "=", "x", ".", "lower", "(", ")", "return", "x" ]
Convert standard hg38 HLA name into ABSOLUTE naming.
[ "Convert", "standard", "hg38", "HLA", "name", "into", "ABSOLUTE", "naming", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L286-L292
224,248
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
get_hla_choice
def get_hla_choice(h, hlas, normal_bam, tumor_bam): """Retrieve matching HLA with best read support in both tumor and normal """ def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
python
def get_hla_choice(h, hlas, normal_bam, tumor_bam): """Retrieve matching HLA with best read support in both tumor and normal """ def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
[ "def", "get_hla_choice", "(", "h", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "def", "get_counts", "(", "bam_file", ")", ":", "counts", "=", "{", "}", "for", "line", "in", "subprocess", ".", "check_output", "(", "[", "\"samtools\"", ",",...
Retrieve matching HLA with best read support in both tumor and normal
[ "Retrieve", "matching", "HLA", "with", "best", "read", "support", "in", "both", "tumor", "and", "normal" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L294-L309
224,249
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_hla
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): """Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move """ work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
python
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): """Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move """ work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
[ "def", "prep_hla", "(", "work_dir", ",", "sample", ",", "calls", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample", ",", "\"inp...
Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
[ "Convert", "HLAs", "into", "ABSOLUTE", "format", "for", "use", "with", "LOHHLA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L311-L326
224,250
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_ploidy
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob): """Create LOHHLA compatible input ploidy file from PureCN output. """ purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv")) work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) out_file = os.path.join(work_dir, "%s-solutions.txt" % sample) with open(purecn_file) as in_handle: reader = csv.reader(in_handle) purecn_stats = dict(zip(next(reader), next(reader))) with open(out_file, "w") as out_handle: out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n") lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0] out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"], purecn_stats["Purity"], purecn_stats["Ploidy"])) return out_file
python
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob): """Create LOHHLA compatible input ploidy file from PureCN output. """ purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv")) work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) out_file = os.path.join(work_dir, "%s-solutions.txt" % sample) with open(purecn_file) as in_handle: reader = csv.reader(in_handle) purecn_stats = dict(zip(next(reader), next(reader))) with open(out_file, "w") as out_handle: out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n") lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0] out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"], purecn_stats["Purity"], purecn_stats["Ploidy"])) return out_file
[ "def", "prep_ploidy", "(", "work_dir", ",", "sample", ",", "bam_file", ",", "cromwell_dir", ",", "sv_glob", ")", ":", "purecn_file", "=", "_get_cromwell_file", "(", "cromwell_dir", ",", "sv_glob", ",", "dict", "(", "sample", "=", "sample", ",", "method", "="...
Create LOHHLA compatible input ploidy file from PureCN output.
[ "Create", "LOHHLA", "compatible", "input", "ploidy", "file", "from", "PureCN", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L328-L342
224,251
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie.py
_bowtie_args_from_config
def _bowtie_args_from_config(data): """Configurable high level options for bowtie. """ config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
python
def _bowtie_args_from_config(data): """Configurable high level options for bowtie. """ config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
[ "def", "_bowtie_args_from_config", "(", "data", ")", ":", "config", "=", "data", "[", "'config'", "]", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", "if", "qual_format", ".", "lower", "(", ...
Configurable high level options for bowtie.
[ "Configurable", "high", "level", "options", "for", "bowtie", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie.py#L13-L28
224,252
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Do standard or paired end alignment with bowtie. """ num_hits = 1 if data["analysis"].lower().startswith("smallrna-seq"): num_hits = 1000 config = data['config'] out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if fastq_file.endswith(".gz"): fastq_file = "<(gunzip -c %s)" % fastq_file if pair_file: pair_file = "<(gunzip -c %s)" % pair_file if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie", config)] cl += _bowtie_args_from_config(data) cl += extra_args if extra_args is not None else [] cl += ["-q", "-v", 2, "-k", num_hits, "-X", 2000, # default is too selective for most data "--best", "--strata", "--sam", ref_file] if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += [fastq_file] cl = [str(i) for i in cl] fix_rg_cmd = r"samtools addreplacerg -r '%s' -" % novoalign.get_rg_info(data["rgnames"]) if fix_rg_cmd: cmd = " ".join(cl) + " | " + fix_rg_cmd + " | " + tobam_cl else: cmd = " ".join(cl) + " | " + tobam_cl do.run(cmd, "Running Bowtie on %s and %s." % (fastq_file, pair_file), data) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Do standard or paired end alignment with bowtie. """ num_hits = 1 if data["analysis"].lower().startswith("smallrna-seq"): num_hits = 1000 config = data['config'] out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if fastq_file.endswith(".gz"): fastq_file = "<(gunzip -c %s)" % fastq_file if pair_file: pair_file = "<(gunzip -c %s)" % pair_file if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie", config)] cl += _bowtie_args_from_config(data) cl += extra_args if extra_args is not None else [] cl += ["-q", "-v", 2, "-k", num_hits, "-X", 2000, # default is too selective for most data "--best", "--strata", "--sam", ref_file] if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += [fastq_file] cl = [str(i) for i in cl] fix_rg_cmd = r"samtools addreplacerg -r '%s' -" % novoalign.get_rg_info(data["rgnames"]) if fix_rg_cmd: cmd = " ".join(cl) + " | " + fix_rg_cmd + " | " + tobam_cl else: cmd = " ".join(cl) + " | " + tobam_cl do.run(cmd, "Running Bowtie on %s and %s." % (fastq_file, pair_file), data) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "num_hits", "=", "1", "if", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", ".", "s...
Do standard or paired end alignment with bowtie.
[ "Do", "standard", "or", "paired", "end", "alignment", "with", "bowtie", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie.py#L30-L74
224,253
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
subset_by_supported
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
python
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
[ "def", "subset_by_supported", "(", "input_file", ",", "get_coords", ",", "calls_by_name", ",", "work_dir", ",", "data", ",", "headers", "=", "(", "\"#\"", ",", ")", ")", ":", "support_files", "=", "[", "(", "c", ",", "tz", ".", "get_in", "(", "[", "c",...
Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types.
[ "Limit", "CNVkit", "input", "to", "calls", "with", "support", "from", "another", "caller", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L43-L76
224,254
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_input_to_bed
def _input_to_bed(theta_input, work_dir, get_coords, headers): """Convert input file to a BED file for comparisons """ theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0]) with open(theta_input) as in_handle: with open(theta_bed, "w") as out_handle: for line in in_handle: if not line.startswith(headers): chrom, start, end = get_coords(line) out_handle.write("\t".join([chrom, start, end]) + "\n") return theta_bed
python
def _input_to_bed(theta_input, work_dir, get_coords, headers): """Convert input file to a BED file for comparisons """ theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0]) with open(theta_input) as in_handle: with open(theta_bed, "w") as out_handle: for line in in_handle: if not line.startswith(headers): chrom, start, end = get_coords(line) out_handle.write("\t".join([chrom, start, end]) + "\n") return theta_bed
[ "def", "_input_to_bed", "(", "theta_input", ",", "work_dir", ",", "get_coords", ",", "headers", ")", ":", "theta_bed", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.bed\"", "%", "os", ".", "path", ".", "splitext", "(", "os", ".", "pa...
Convert input file to a BED file for comparisons
[ "Convert", "input", "file", "to", "a", "BED", "file", "for", "comparisons" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L88-L98
224,255
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_run_theta
def _run_theta(cnv_info, data, work_dir, run_n3=True): """Run theta, calculating subpopulations and normal contamination. """ out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
python
def _run_theta(cnv_info, data, work_dir, run_n3=True): """Run theta, calculating subpopulations and normal contamination. """ out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
[ "def", "_run_theta", "(", "cnv_info", ",", "data", ",", "work_dir", ",", "run_n3", "=", "True", ")", ":", "out", "=", "{", "\"caller\"", ":", "\"theta\"", "}", "max_normal", "=", "\"0.9\"", "opts", "=", "[", "\"-m\"", ",", "max_normal", "]", "n2_result",...
Run theta, calculating subpopulations and normal contamination.
[ "Run", "theta", "calculating", "subpopulations", "and", "normal", "contamination", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L100-L120
224,256
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_update_with_calls
def _update_with_calls(result_file, cnv_file): """Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA. """ results = {} with open(result_file) as in_handle: in_handle.readline() # header _, _, cs, ps = in_handle.readline().strip().split() for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))): results[i] = (c, p) cnvs = {} with open(cnv_file) as in_handle: for line in in_handle: chrom, start, end, _, count = line.rstrip().split()[:5] cnvs[(chrom, start, end)] = count def update(i, line): parts = line.rstrip().split("\t") chrom, start, end = parts[1:4] parts += cnvs.get((chrom, start, end), ".") parts += list(results[i]) return "\t".join(parts) + "\n" return update
python
def _update_with_calls(result_file, cnv_file): """Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA. """ results = {} with open(result_file) as in_handle: in_handle.readline() # header _, _, cs, ps = in_handle.readline().strip().split() for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))): results[i] = (c, p) cnvs = {} with open(cnv_file) as in_handle: for line in in_handle: chrom, start, end, _, count = line.rstrip().split()[:5] cnvs[(chrom, start, end)] = count def update(i, line): parts = line.rstrip().split("\t") chrom, start, end = parts[1:4] parts += cnvs.get((chrom, start, end), ".") parts += list(results[i]) return "\t".join(parts) + "\n" return update
[ "def", "_update_with_calls", "(", "result_file", ",", "cnv_file", ")", ":", "results", "=", "{", "}", "with", "open", "(", "result_file", ")", "as", "in_handle", ":", "in_handle", ".", "readline", "(", ")", "# header", "_", ",", "_", ",", "cs", ",", "p...
Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA.
[ "Update", "bounds", "with", "calls", "from", "CNVkit", "inferred", "copy", "numbers", "and", "p", "-", "values", "from", "THetA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L122-L142
224,257
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_merge_theta_calls
def _merge_theta_calls(bounds_file, result_file, cnv_file, data): """Create a final output file with merged CNVkit and THetA copy and population estimates. """ out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", "")) if not utils.file_uptodate(out_file, result_file): with file_transaction(data, out_file) as tx_out_file: updater = _update_with_calls(result_file, cnv_file) with open(bounds_file) as in_handle: with open(tx_out_file, "w") as out_handle: i = 0 for line in in_handle: if line.startswith("#"): parts = line.rstrip().split("\t") parts += ["cnv", "pop_cnvs", "pop_pvals"] out_handle.write("\t".join(parts) + "\n") else: out_handle.write(updater(i, line)) i += 1 return out_file
python
def _merge_theta_calls(bounds_file, result_file, cnv_file, data): """Create a final output file with merged CNVkit and THetA copy and population estimates. """ out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", "")) if not utils.file_uptodate(out_file, result_file): with file_transaction(data, out_file) as tx_out_file: updater = _update_with_calls(result_file, cnv_file) with open(bounds_file) as in_handle: with open(tx_out_file, "w") as out_handle: i = 0 for line in in_handle: if line.startswith("#"): parts = line.rstrip().split("\t") parts += ["cnv", "pop_cnvs", "pop_pvals"] out_handle.write("\t".join(parts) + "\n") else: out_handle.write(updater(i, line)) i += 1 return out_file
[ "def", "_merge_theta_calls", "(", "bounds_file", ",", "result_file", ",", "cnv_file", ",", "data", ")", ":", "out_file", "=", "\"%s-merged.txt\"", "%", "(", "result_file", ".", "replace", "(", "\".BEST.results\"", ",", "\"\"", ")", ")", "if", "not", "utils", ...
Create a final output file with merged CNVkit and THetA copy and population estimates.
[ "Create", "a", "final", "output", "file", "with", "merged", "CNVkit", "and", "THetA", "copy", "and", "population", "estimates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L144-L162
224,258
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_select_model
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data): """Run final model selection from n=2 and n=3 options. """ n2_out_file = n2_result.replace(".n2.results", ".BEST.results") n3_out_file = n3_result.replace(".n3.results", ".BEST.results") if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file): cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result] do.run(cmd, "Select best THetA model") if utils.file_exists(n2_out_file): return n2_out_file else: assert utils.file_exists(n3_out_file) return n3_out_file
python
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data): """Run final model selection from n=2 and n=3 options. """ n2_out_file = n2_result.replace(".n2.results", ".BEST.results") n3_out_file = n3_result.replace(".n3.results", ".BEST.results") if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file): cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result] do.run(cmd, "Select best THetA model") if utils.file_exists(n2_out_file): return n2_out_file else: assert utils.file_exists(n3_out_file) return n3_out_file
[ "def", "_select_model", "(", "n2_bounds", ",", "n2_result", ",", "n3_result", ",", "out_dir", ",", "data", ")", ":", "n2_out_file", "=", "n2_result", ".", "replace", "(", "\".n2.results\"", ",", "\".BEST.results\"", ")", "n3_out_file", "=", "n3_result", ".", "...
Run final model selection from n=2 and n=3 options.
[ "Run", "final", "model", "selection", "from", "n", "=", "2", "and", "n", "=", "3", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L164-L176
224,259
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_safe_run_theta
def _safe_run_theta(input_file, out_dir, output_ext, args, data): """Run THetA, catching and continuing on any errors. """ out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
python
def _safe_run_theta(input_file, out_dir, output_ext, args, data): """Run THetA, catching and continuing on any errors. """ out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
[ "def", "_safe_run_theta", "(", "input_file", ",", "out_dir", ",", "output_ext", ",", "args", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "_split_theta_ext", "(", "input_file", ")", "+", "output_ext", ")", ...
Run THetA, catching and continuing on any errors.
[ "Run", "THetA", "catching", "and", "continuing", "on", "any", "errors", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L178-L201
224,260
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_get_cmd
def _get_cmd(cmd): """Retrieve required commands for running THetA with our local bcbio python. """ check_cmd = "RunTHetA.py" try: local_cmd = subprocess.check_output(["which", check_cmd]).strip() except subprocess.CalledProcessError: return None return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
python
def _get_cmd(cmd): """Retrieve required commands for running THetA with our local bcbio python. """ check_cmd = "RunTHetA.py" try: local_cmd = subprocess.check_output(["which", check_cmd]).strip() except subprocess.CalledProcessError: return None return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
[ "def", "_get_cmd", "(", "cmd", ")", ":", "check_cmd", "=", "\"RunTHetA.py\"", "try", ":", "local_cmd", "=", "subprocess", ".", "check_output", "(", "[", "\"which\"", ",", "check_cmd", "]", ")", ".", "strip", "(", ")", "except", "subprocess", ".", "CalledPr...
Retrieve required commands for running THetA with our local bcbio python.
[ "Retrieve", "required", "commands", "for", "running", "THetA", "with", "our", "local", "bcbio", "python", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L213-L221
224,261
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
run
def run(data): """Proxy function to run the tool""" sample = data[0][0] work_dir = dd.get_work_dir(sample) out_dir = os.path.join(work_dir, "mirge") lib = _find_lib(sample) mirge = _find_mirge(sample) bowtie = _find_bowtie(sample) sps = dd.get_species(sample) species = SPS.get(sps, "") if not species: raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps) if not lib: raise ValueError("-lib option is not set up in resources for mirge tool." " Read above warnings lines.") if not utils.file_exists(out_dir): with tx_tmpdir() as tmp_dir: sample_file = _create_sample_file(data, tmp_dir) do.run(_cmd().format(**locals()), "Running miRge2.0.") shutil.move(tmp_dir, out_dir) return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
python
def run(data): """Proxy function to run the tool""" sample = data[0][0] work_dir = dd.get_work_dir(sample) out_dir = os.path.join(work_dir, "mirge") lib = _find_lib(sample) mirge = _find_mirge(sample) bowtie = _find_bowtie(sample) sps = dd.get_species(sample) species = SPS.get(sps, "") if not species: raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps) if not lib: raise ValueError("-lib option is not set up in resources for mirge tool." " Read above warnings lines.") if not utils.file_exists(out_dir): with tx_tmpdir() as tmp_dir: sample_file = _create_sample_file(data, tmp_dir) do.run(_cmd().format(**locals()), "Running miRge2.0.") shutil.move(tmp_dir, out_dir) return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
[ "def", "run", "(", "data", ")", ":", "sample", "=", "data", "[", "0", "]", "[", "0", "]", "work_dir", "=", "dd", ".", "get_work_dir", "(", "sample", ")", "out_dir", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"mirge\"", ")", "lib...
Proxy function to run the tool
[ "Proxy", "function", "to", "run", "the", "tool" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L20-L41
224,262
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
_create_sample_file
def _create_sample_file(data, out_dir): """from data list all the fastq files in a file""" sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
python
def _create_sample_file(data, out_dir): """from data list all the fastq files in a file""" sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
[ "def", "_create_sample_file", "(", "data", ",", "out_dir", ")", ":", "sample_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"sample_file.txt\"", ")", "with", "open", "(", "sample_file", ",", "'w'", ")", "as", "outh", ":", "for", "samp...
from data list all the fastq files in a file
[ "from", "data", "list", "all", "the", "fastq", "files", "in", "a", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L55-L61
224,263
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
_find_lib
def _find_lib(data): """Find mirge libs""" options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", "")) if options.find("-lib") > -1 and utils.file_exists(options.split()[1]): return options if not options: logger.warning("miRge libraries not found. Follow these instructions to install them:") logger.warning("https://github.com/mhalushka/miRge#download-libraries") logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]") logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
python
def _find_lib(data): """Find mirge libs""" options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", "")) if options.find("-lib") > -1 and utils.file_exists(options.split()[1]): return options if not options: logger.warning("miRge libraries not found. Follow these instructions to install them:") logger.warning("https://github.com/mhalushka/miRge#download-libraries") logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]") logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
[ "def", "_find_lib", "(", "data", ")", ":", "options", "=", "\" \"", ".", "join", "(", "data", ".", "get", "(", "'resources'", ",", "{", "}", ")", ".", "get", "(", "'mirge'", ",", "{", "}", ")", ".", "get", "(", "\"options\"", ",", "\"\"", ")", ...
Find mirge libs
[ "Find", "mirge", "libs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L71-L80
224,264
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_input_sequence_files
def get_input_sequence_files(data, default=None): """ returns the input sequencing files, these can be single or paired FASTQ files or BAM files """ if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
python
def get_input_sequence_files(data, default=None): """ returns the input sequencing files, these can be single or paired FASTQ files or BAM files """ if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
[ "def", "get_input_sequence_files", "(", "data", ",", "default", "=", "None", ")", ":", "if", "\"files\"", "not", "in", "data", "or", "data", ".", "get", "(", "\"files\"", ")", "is", "None", ":", "file1", ",", "file2", "=", "None", ",", "None", "elif", ...
returns the input sequencing files, these can be single or paired FASTQ files or BAM files
[ "returns", "the", "input", "sequencing", "files", "these", "can", "be", "single", "or", "paired", "FASTQ", "files", "or", "BAM", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L223-L235
224,265
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_umi_consensus
def get_umi_consensus(data): """Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.` """ consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) # don't run consensus UMI calling for scrna-seq if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
python
def get_umi_consensus(data): """Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.` """ consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) # don't run consensus UMI calling for scrna-seq if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
[ "def", "get_umi_consensus", "(", "data", ")", ":", "consensus_choices", "=", "(", "[", "\"fastq_name\"", "]", ")", "umi", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"umi_type\"", "]", ",", "data", ")", "# don't run consens...
Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.`
[ "Retrieve", "UMI", "for", "consensus", "based", "preparation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L237-L251
224,266
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_dexseq_gff
def get_dexseq_gff(config, default=None): """ some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one """ dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}), config, None) if not dexseq_gff: return None gtf_file = get_gtf_file(config) if gtf_file: base_dir = os.path.dirname(gtf_file) else: base_dir = os.path.dirname(dexseq_gff) base, _ = os.path.splitext(dexseq_gff) gff_file = os.path.join(base_dir, base + ".gff") if file_exists(gff_file): return gff_file gtf_file = os.path.join(base_dir, base + ".gff3") if file_exists(gtf_file): return gtf_file else: return None
python
def get_dexseq_gff(config, default=None): """ some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one """ dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}), config, None) if not dexseq_gff: return None gtf_file = get_gtf_file(config) if gtf_file: base_dir = os.path.dirname(gtf_file) else: base_dir = os.path.dirname(dexseq_gff) base, _ = os.path.splitext(dexseq_gff) gff_file = os.path.join(base_dir, base + ".gff") if file_exists(gff_file): return gff_file gtf_file = os.path.join(base_dir, base + ".gff3") if file_exists(gtf_file): return gtf_file else: return None
[ "def", "get_dexseq_gff", "(", "config", ",", "default", "=", "None", ")", ":", "dexseq_gff", "=", "tz", ".", "get_in", "(", "tz", ".", "get_in", "(", "[", "'dexseq_gff'", ",", "'keys'", "]", ",", "LOOKUPS", ",", "{", "}", ")", ",", "config", ",", "...
some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one
[ "some", "older", "versions", "of", "the", "genomes", "have", "the", "DEXseq", "gff", "file", "as", "gff", "instead", "of", "gff3", "so", "this", "handles", "that", "by", "looking", "for", "either", "one" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L253-L275
224,267
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_in_samples
def get_in_samples(samples, fn): """ for a list of samples, return the value of a global option """ for sample in samples: sample = to_single_data(sample) if fn(sample, None): return fn(sample) return None
python
def get_in_samples(samples, fn): """ for a list of samples, return the value of a global option """ for sample in samples: sample = to_single_data(sample) if fn(sample, None): return fn(sample) return None
[ "def", "get_in_samples", "(", "samples", ",", "fn", ")", ":", "for", "sample", "in", "samples", ":", "sample", "=", "to_single_data", "(", "sample", ")", "if", "fn", "(", "sample", ",", "None", ")", ":", "return", "fn", "(", "sample", ")", "return", ...
for a list of samples, return the value of a global option
[ "for", "a", "list", "of", "samples", "return", "the", "value", "of", "a", "global", "option" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L329-L337
224,268
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
update_summary_qc
def update_summary_qc(data, key, base=None, secondary=None): """ updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc """ summary = get_summary_qc(data, {}) if base and secondary: summary[key] = {"base": base, "secondary": secondary} elif base: summary[key] = {"base": base} elif secondary: summary[key] = {"secondary": secondary} data = set_summary_qc(data, summary) return data
python
def update_summary_qc(data, key, base=None, secondary=None): """ updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc """ summary = get_summary_qc(data, {}) if base and secondary: summary[key] = {"base": base, "secondary": secondary} elif base: summary[key] = {"base": base} elif secondary: summary[key] = {"secondary": secondary} data = set_summary_qc(data, summary) return data
[ "def", "update_summary_qc", "(", "data", ",", "key", ",", "base", "=", "None", ",", "secondary", "=", "None", ")", ":", "summary", "=", "get_summary_qc", "(", "data", ",", "{", "}", ")", "if", "base", "and", "secondary", ":", "summary", "[", "key", "...
updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc
[ "updates", "summary_qc", "with", "a", "new", "section", "keyed", "by", "key", ".", "stick", "files", "into", "summary_qc", "if", "you", "want", "them", "propagated", "forward", "and", "available", "for", "multiqc" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L351-L365
224,269
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
has_variantcalls
def has_variantcalls(data): """ returns True if the data dictionary is configured for variant calling """ analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
python
def has_variantcalls(data): """ returns True if the data dictionary is configured for variant calling """ analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
[ "def", "has_variantcalls", "(", "data", ")", ":", "analysis", "=", "get_analysis", "(", "data", ")", ".", "lower", "(", ")", "variant_pipeline", "=", "analysis", ".", "startswith", "(", "(", "\"standard\"", ",", "\"variant\"", ",", "\"variant2\"", ")", ")", ...
returns True if the data dictionary is configured for variant calling
[ "returns", "True", "if", "the", "data", "dictionary", "is", "configured", "for", "variant", "calling" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L367-L374
224,270
bcbio/bcbio-nextgen
bcbio/rnaseq/qc.py
estimate_library_complexity
def estimate_library_complexity(df, algorithm="RNA-seq"): """ estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line """ DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
python
def estimate_library_complexity(df, algorithm="RNA-seq"): """ estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line """ DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
[ "def", "estimate_library_complexity", "(", "df", ",", "algorithm", "=", "\"RNA-seq\"", ")", ":", "DEFAULT_CUTOFFS", "=", "{", "\"RNA-seq\"", ":", "(", "0.25", ",", "0.40", ")", "}", "cutoffs", "=", "DEFAULT_CUTOFFS", "[", "algorithm", "]", "if", "len", "(", ...
estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line
[ "estimate", "library", "complexity", "from", "the", "number", "of", "reads", "vs", ".", "number", "of", "unique", "start", "sites", ".", "returns", "NA", "if", "there", "are", "not", "enough", "data", "points", "to", "fit", "the", "line" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/qc.py#L42-L64
224,271
bcbio/bcbio-nextgen
bcbio/galaxy/api.py
GalaxyApiAccess.run_details
def run_details(self, run_bc, run_date=None): """Next Gen LIMS specific API functionality. """ try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
python
def run_details(self, run_bc, run_date=None): """Next Gen LIMS specific API functionality. """ try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
[ "def", "run_details", "(", "self", ",", "run_bc", ",", "run_date", "=", "None", ")", ":", "try", ":", "details", "=", "self", ".", "_get", "(", "\"/nglims/api_run_details\"", ",", "dict", "(", "run", "=", "run_bc", ")", ")", "except", "ValueError", ":", ...
Next Gen LIMS specific API functionality.
[ "Next", "Gen", "LIMS", "specific", "API", "functionality", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/api.py#L52-L64
224,272
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
fixrg
def fixrg(in_bam, names, ref_file, dirs, data): """Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: rg_info = novoalign.get_rg_info(names) new_header = "%s-header.txt" % os.path.splitext(out_file)[0] cores = dd.get_cores(data) do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()), "Create empty RG header: %s" % dd.get_sample_name(data)) cmd = ("samtools reheader {new_header} {in_bam} | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -") do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_name(data)) return out_file
python
def fixrg(in_bam, names, ref_file, dirs, data): """Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: rg_info = novoalign.get_rg_info(names) new_header = "%s-header.txt" % os.path.splitext(out_file)[0] cores = dd.get_cores(data) do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()), "Create empty RG header: %s" % dd.get_sample_name(data)) cmd = ("samtools reheader {new_header} {in_bam} | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -") do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_name(data)) return out_file
[ "def", "fixrg", "(", "in_bam", ",", "names", ",", "ref_file", ",", "dirs", ",", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"bamclean\"...
Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this
[ "Fix", "read", "group", "in", "a", "file", "using", "samtools", "addreplacerg", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L20-L40
224,273
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
_target_chroms_and_header
def _target_chroms_and_header(bam_file, data): """Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues. """ special_remaps = {"chrM": "MT", "MT": "chrM"} target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data))) if chromhacks.is_autosomal_or_sex(x.name)]) out_chroms = [] with pysam.Samfile(bam_file, "rb") as bamfile: for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]): if bam_contig in target_chroms: target_chrom = bam_contig elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms: target_chrom = special_remaps[bam_contig] elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms: target_chrom = bam_contig.replace("chr", "") elif "chr%s" % bam_contig in target_chroms: target_chrom = "chr%s" % bam_contig else: target_chrom = None # target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping if target_chrom and target_chrom == bam_contig: # Order not required if dealing with SAM file header fixing #assert bami == target_chroms[target_chrom], \ # ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" % # (bam_file, target_chrom, bami, target_chroms[target_chrom])) out_chroms.append(target_chrom) assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" % (bam_file, target_chroms)) return out_chroms
python
def _target_chroms_and_header(bam_file, data): """Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues. """ special_remaps = {"chrM": "MT", "MT": "chrM"} target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data))) if chromhacks.is_autosomal_or_sex(x.name)]) out_chroms = [] with pysam.Samfile(bam_file, "rb") as bamfile: for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]): if bam_contig in target_chroms: target_chrom = bam_contig elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms: target_chrom = special_remaps[bam_contig] elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms: target_chrom = bam_contig.replace("chr", "") elif "chr%s" % bam_contig in target_chroms: target_chrom = "chr%s" % bam_contig else: target_chrom = None # target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping if target_chrom and target_chrom == bam_contig: # Order not required if dealing with SAM file header fixing #assert bami == target_chroms[target_chrom], \ # ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" % # (bam_file, target_chrom, bami, target_chroms[target_chrom])) out_chroms.append(target_chrom) assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" % (bam_file, target_chroms)) return out_chroms
[ "def", "_target_chroms_and_header", "(", "bam_file", ",", "data", ")", ":", "special_remaps", "=", "{", "\"chrM\"", ":", "\"MT\"", ",", "\"MT\"", ":", "\"chrM\"", "}", "target_chroms", "=", "dict", "(", "[", "(", "x", ".", "name", ",", "i", ")", "for", ...
Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues.
[ "Get", "a", "list", "of", "chromosomes", "to", "target", "and", "new", "updated", "ref_file", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L75-L106
224,274
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
picard_prep
def picard_prep(in_bam, names, ref_file, dirs, data): """Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities """ runner = broad.runner_from_path("picard", data["config"]) work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"])) runner.run_fn("picard_index_ref", ref_file) reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % os.path.splitext(os.path.basename(in_bam))[0]) if not utils.file_exists(reorder_bam): reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data)) reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam) rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names) return _filter_bad_reads(rg_bam, ref_file, data)
python
def picard_prep(in_bam, names, ref_file, dirs, data): """Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities """ runner = broad.runner_from_path("picard", data["config"]) work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"])) runner.run_fn("picard_index_ref", ref_file) reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % os.path.splitext(os.path.basename(in_bam))[0]) if not utils.file_exists(reorder_bam): reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data)) reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam) rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names) return _filter_bad_reads(rg_bam, ref_file, data)
[ "def", "picard_prep", "(", "in_bam", ",", "names", ",", "ref_file", ",", "dirs", ",", "data", ")", ":", "runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "data", "[", "\"config\"", "]", ")", "work_dir", "=", "utils", ".", "safe_mak...
Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities
[ "Prepare", "input", "BAM", "using", "Picard", "and", "GATK", "cleaning", "tools", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L122-L139
224,275
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
_filter_bad_reads
def _filter_bad_reads(in_bam, ref_file, data): """Use GATK filter to remove problem reads which choke GATK and Picard. """ bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params = [("FixMisencodedBaseQualityReads" if dd.get_quality_format(data, "").lower() == "illumina" else "PrintReads"), "-R", ref_file, "-I", in_bam, "-O", tx_out_file, "-RF", "MatchingBasesAndQualsReadFilter", "-RF", "SeqIsStoredReadFilter", "-RF", "CigarContainsNoNOperator"] jvm_opts = broad.get_gatk_opts(data["config"], tmp_dir) do.run(broad.gatk_cmd("gatk", jvm_opts, params), "Filter problem reads") bam.index(out_file, data["config"]) return out_file
python
def _filter_bad_reads(in_bam, ref_file, data): """Use GATK filter to remove problem reads which choke GATK and Picard. """ bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params = [("FixMisencodedBaseQualityReads" if dd.get_quality_format(data, "").lower() == "illumina" else "PrintReads"), "-R", ref_file, "-I", in_bam, "-O", tx_out_file, "-RF", "MatchingBasesAndQualsReadFilter", "-RF", "SeqIsStoredReadFilter", "-RF", "CigarContainsNoNOperator"] jvm_opts = broad.get_gatk_opts(data["config"], tmp_dir) do.run(broad.gatk_cmd("gatk", jvm_opts, params), "Filter problem reads") bam.index(out_file, data["config"]) return out_file
[ "def", "_filter_bad_reads", "(", "in_bam", ",", "ref_file", ",", "data", ")", ":", "bam", ".", "index", "(", "in_bam", ",", "data", "[", "\"config\"", "]", ")", "out_file", "=", "\"%s-gatkfilter.bam\"", "%", "os", ".", "path", ".", "splitext", "(", "in_b...
Use GATK filter to remove problem reads which choke GATK and Picard.
[ "Use", "GATK", "filter", "to", "remove", "problem", "reads", "which", "choke", "GATK", "and", "Picard", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L141-L161
224,276
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
generate_parallel
def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
python
def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
[ "def", "generate_parallel", "(", "samples", ",", "run_parallel", ")", ":", "to_analyze", ",", "extras", "=", "_split_samples_by_qc", "(", "samples", ")", "qced", "=", "run_parallel", "(", "\"pipeline_summary\"", ",", "to_analyze", ")", "samples", "=", "_combine_qc...
Provide parallel preparation of summary information for alignment and variant calling.
[ "Provide", "parallel", "preparation", "of", "summary", "information", "for", "alignment", "and", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L38-L58
224,277
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
pipeline_summary
def pipeline_summary(data): """Provide summary information on processing sample. Handles standard and CWL (single QC output) cases. """ data = utils.to_single_data(data) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]]
python
def pipeline_summary(data): """Provide summary information on processing sample. Handles standard and CWL (single QC output) cases. """ data = utils.to_single_data(data) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]]
[ "def", "pipeline_summary", "(", "data", ")", ":", "data", "=", "utils", ".", "to_single_data", "(", "data", ")", "work_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "if", "not", "work_bam", ...
Provide summary information on processing sample. Handles standard and CWL (single QC output) cases.
[ "Provide", "summary", "information", "on", "processing", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L60-L76
224,278
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
get_qc_tools
def get_qc_tools(data): """Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set. """ if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["contamination", "peddy"] if vcfutils.get_paired_phenotype(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run
python
def get_qc_tools(data): """Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set. """ if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["contamination", "peddy"] if vcfutils.get_paired_phenotype(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run
[ "def", "get_qc_tools", "(", "data", ")", ":", "if", "dd", ".", "get_algorithm_qc", "(", "data", ")", ":", "return", "dd", ".", "get_algorithm_qc", "(", "data", ")", "analysis", "=", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", "to_run", "=...
Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set.
[ "Retrieve", "a", "list", "of", "QC", "tools", "to", "use", "based", "on", "configuration", "and", "analysis", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L78-L123
224,279
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_run_qc_tools
def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
python
def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
[ "def", "_run_qc_tools", "(", "bam_file", ",", "data", ")", ":", "from", "bcbio", ".", "qc", "import", "(", "atropos", ",", "contamination", ",", "coverage", ",", "damage", ",", "fastqc", ",", "kraken", ",", "qsignature", ",", "qualimap", ",", "samtools", ...
Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools
[ "Run", "a", "set", "of", "third", "party", "quality", "control", "tools", "returning", "QC", "directory", "and", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L125-L187
224,280
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_organize_qc_files
def _organize_qc_files(program, qc_dir): """Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing. """ base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary}
python
def _organize_qc_files(program, qc_dir): """Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing. """ base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary}
[ "def", "_organize_qc_files", "(", "program", ",", "qc_dir", ")", ":", "base_files", "=", "{", "\"fastqc\"", ":", "\"fastqc_report.html\"", ",", "\"qualimap_rnaseq\"", ":", "\"qualimapReport.html\"", ",", "\"qualimap\"", ":", "\"qualimapReport.html\"", "}", "if", "os",...
Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing.
[ "Organize", "outputs", "from", "quality", "control", "runs", "into", "a", "base", "file", "and", "secondary", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L189-L220
224,281
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_split_samples_by_qc
def _split_samples_by_qc(samples): """Split data into individual quality control steps for a run. """ to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras
python
def _split_samples_by_qc(samples): """Split data into individual quality control steps for a run. """ to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras
[ "def", "_split_samples_by_qc", "(", "samples", ")", ":", "to_process", "=", "[", "]", "extras", "=", "[", "]", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ":", "qcs", "=", "dd", ".", "g...
Split data into individual quality control steps for a run.
[ "Split", "data", "into", "individual", "quality", "control", "steps", "for", "a", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L224-L240
224,282
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_combine_qc_samples
def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
python
def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
[ "def", "_combine_qc_samples", "(", "samples", ")", ":", "by_bam", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ":", "batch", "=", "d...
Combine split QC analyses into single samples based on BAM files.
[ "Combine", "split", "QC", "analyses", "into", "single", "samples", "based", "on", "BAM", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L242-L266
224,283
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
write_project_summary
def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file
python
def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file
[ "def", "write_project_summary", "(", "samples", ",", "qsign_info", "=", "None", ")", ":", "work_dir", "=", "samples", "[", "0", "]", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "wor...
Write project summary information on the provided samples. write out dirs, genome resources,
[ "Write", "project", "summary", "information", "on", "the", "provided", "samples", ".", "write", "out", "dirs", "genome", "resources" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L270-L295
224,284
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_merge_metadata
def _merge_metadata(samples): """Merge all metadata into CSV file""" samples = list(utils.flatten(samples)) out_dir = dd.get_work_dir(samples[0]) logger.info("summarize metadata") out_file = os.path.join(out_dir, "metadata.csv") sample_metrics = collections.defaultdict(dict) for s in samples: m = tz.get_in(['metadata'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in list(m.keys()): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) pd.DataFrame(sample_metrics).transpose().to_csv(out_file) return out_file
python
def _merge_metadata(samples): """Merge all metadata into CSV file""" samples = list(utils.flatten(samples)) out_dir = dd.get_work_dir(samples[0]) logger.info("summarize metadata") out_file = os.path.join(out_dir, "metadata.csv") sample_metrics = collections.defaultdict(dict) for s in samples: m = tz.get_in(['metadata'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in list(m.keys()): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) pd.DataFrame(sample_metrics).transpose().to_csv(out_file) return out_file
[ "def", "_merge_metadata", "(", "samples", ")", ":", "samples", "=", "list", "(", "utils", ".", "flatten", "(", "samples", ")", ")", "out_dir", "=", "dd", ".", "get_work_dir", "(", "samples", "[", "0", "]", ")", "logger", ".", "info", "(", "\"summarize ...
Merge all metadata into CSV file
[ "Merge", "all", "metadata", "into", "CSV", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L297-L314
224,285
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_other_pipeline_samples
def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if utils.file_exists(summary_file): with open(summary_file) as in_handle: for s in yaml.safe_load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out
python
def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if utils.file_exists(summary_file): with open(summary_file) as in_handle: for s in yaml.safe_load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out
[ "def", "_other_pipeline_samples", "(", "summary_file", ",", "cur_samples", ")", ":", "cur_descriptions", "=", "set", "(", "[", "s", "[", "0", "]", "[", "\"description\"", "]", "for", "s", "in", "cur_samples", "]", ")", "out", "=", "[", "]", "if", "utils"...
Retrieve samples produced previously by another pipeline in the summary output.
[ "Retrieve", "samples", "produced", "previously", "by", "another", "pipeline", "in", "the", "summary", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L316-L326
224,286
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_add_researcher_summary
def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out
python
def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out
[ "def", "_add_researcher_summary", "(", "samples", ",", "summary_yaml", ")", ":", "by_researcher", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "(", "x", "[", "0", "]", "for", "x", "in", "samples", ")", ":", "researcher", ...
Generate summary files per researcher if organized via a LIMS.
[ "Generate", "summary", "files", "per", "researcher", "if", "organized", "via", "a", "LIMS", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L338-L356
224,287
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_summary_csv_by_researcher
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file
python
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file
[ "def", "_summary_csv_by_researcher", "(", "summary_yaml", ",", "researcher", ",", "descrs", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", ...
Generate a CSV file with summary information for a researcher on this project.
[ "Generate", "a", "CSV", "file", "with", "summary", "information", "for", "a", "researcher", "on", "this", "project", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L358-L373
224,288
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
prep_pdf
def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file
python
def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file
[ "def", "prep_pdf", "(", "qc_dir", ",", "config", ")", ":", "html_file", "=", "os", ".", "path", ".", "join", "(", "qc_dir", ",", "\"fastqc\"", ",", "\"fastqc_report.html\"", ")", "html_fixed", "=", "\"%s-fixed%s\"", "%", "os", ".", "path", ".", "splitext",...
Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS.
[ "Create", "PDF", "from", "HTML", "summary", "outputs", "in", "QC", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L377-L399
224,289
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_run_purecn_dx
def _run_purecn_dx(out, paired): """Extract signatures and mutational burdens from PureCN rds file. """ out_base, out, all_files = _get_purecn_dx_files(paired, out) if not utils.file_uptodate(out["mutation_burden"], out["rds"]): with file_transaction(paired.tumor_data, out_base) as tx_out_base: cmd = ["PureCN_Dx.R", "--rds", out["rds"], "--callable", dd.get_sample_callable(paired.tumor_data), "--signatures", "--out", tx_out_base] do.run(cmd, "PureCN Dx mutational burden and signatures") for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) return out
python
def _run_purecn_dx(out, paired): """Extract signatures and mutational burdens from PureCN rds file. """ out_base, out, all_files = _get_purecn_dx_files(paired, out) if not utils.file_uptodate(out["mutation_burden"], out["rds"]): with file_transaction(paired.tumor_data, out_base) as tx_out_base: cmd = ["PureCN_Dx.R", "--rds", out["rds"], "--callable", dd.get_sample_callable(paired.tumor_data), "--signatures", "--out", tx_out_base] do.run(cmd, "PureCN Dx mutational burden and signatures") for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) return out
[ "def", "_run_purecn_dx", "(", "out", ",", "paired", ")", ":", "out_base", ",", "out", ",", "all_files", "=", "_get_purecn_dx_files", "(", "paired", ",", "out", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out", "[", "\"mutation_burden\"", "]", ",...
Extract signatures and mutational burdens from PureCN rds file.
[ "Extract", "signatures", "and", "mutational", "burdens", "from", "PureCN", "rds", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L49-L62
224,290
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_get_purecn_dx_files
def _get_purecn_dx_files(paired, out): """Retrieve files generated by PureCN_Dx """ out_base = "%s-dx" % utils.splitext_plus(out["rds"])[0] all_files = [] for key, ext in [[("mutation_burden",), "_mutation_burden.csv"], [("plot", "signatures"), "_signatures.pdf"], [("signatures",), "_signatures.csv"]]: cur_file = "%s%s" % (out_base, ext) out = tz.update_in(out, key, lambda x: cur_file) all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
python
def _get_purecn_dx_files(paired, out): """Retrieve files generated by PureCN_Dx """ out_base = "%s-dx" % utils.splitext_plus(out["rds"])[0] all_files = [] for key, ext in [[("mutation_burden",), "_mutation_burden.csv"], [("plot", "signatures"), "_signatures.pdf"], [("signatures",), "_signatures.csv"]]: cur_file = "%s%s" % (out_base, ext) out = tz.update_in(out, key, lambda x: cur_file) all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
[ "def", "_get_purecn_dx_files", "(", "paired", ",", "out", ")", ":", "out_base", "=", "\"%s-dx\"", "%", "utils", ".", "splitext_plus", "(", "out", "[", "\"rds\"", "]", ")", "[", "0", "]", "all_files", "=", "[", "]", "for", "key", ",", "ext", "in", "["...
Retrieve files generated by PureCN_Dx
[ "Retrieve", "files", "generated", "by", "PureCN_Dx" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L64-L75
224,291
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_run_purecn
def _run_purecn(paired, work_dir): """Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs. """ segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk} out_base, out, all_files = _get_purecn_files(paired, work_dir) failed_file = out_base + "-failed.log" cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file): cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired) from bcbio import heterogeneity vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"] vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir) with file_transaction(paired.tumor_data, out_base) as tx_out_base: # Use UCSC style naming for human builds to support BSgenome genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"] else dd.get_genome_build(paired.tumor_data)) cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base, "--sampleid", dd.get_sample_name(paired.tumor_data), "--genome", genome, "--vcf", vcf_file, "--tumor", cnr_file, "--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"] if dd.get_num_cores(paired.tumor_data) > 1: cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))] try: cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PureCN copy number calling") except subprocess.CalledProcessError as msg: if _allowed_errors(str(msg)): logger.info("PureCN failed to find solution for %s: skipping" % dd.get_sample_name(paired.tumor_data)) with open(failed_file, "w") as out_handle: out_handle.write(str(msg)) else: logger.exception() raise for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) out = _get_purecn_files(paired, work_dir, require_exist=True)[1] return out if (out.get("rds") and os.path.exists(out["rds"])) else None
python
def _run_purecn(paired, work_dir): """Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs. """ segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk} out_base, out, all_files = _get_purecn_files(paired, work_dir) failed_file = out_base + "-failed.log" cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file): cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired) from bcbio import heterogeneity vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"] vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir) with file_transaction(paired.tumor_data, out_base) as tx_out_base: # Use UCSC style naming for human builds to support BSgenome genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"] else dd.get_genome_build(paired.tumor_data)) cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base, "--sampleid", dd.get_sample_name(paired.tumor_data), "--genome", genome, "--vcf", vcf_file, "--tumor", cnr_file, "--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"] if dd.get_num_cores(paired.tumor_data) > 1: cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))] try: cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PureCN copy number calling") except subprocess.CalledProcessError as msg: if _allowed_errors(str(msg)): logger.info("PureCN failed to find solution for %s: skipping" % dd.get_sample_name(paired.tumor_data)) with open(failed_file, "w") as out_handle: out_handle.write(str(msg)) else: logger.exception() raise for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) out = _get_purecn_files(paired, work_dir, require_exist=True)[1] return out if (out.get("rds") and os.path.exists(out["rds"])) else None
[ "def", "_run_purecn", "(", "paired", ",", "work_dir", ")", ":", "segfns", "=", "{", "\"cnvkit\"", ":", "_segment_normalized_cnvkit", ",", "\"gatk-cnv\"", ":", "_segment_normalized_gatk", "}", "out_base", ",", "out", ",", "all_files", "=", "_get_purecn_files", "(",...
Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs.
[ "Run", "PureCN", ".", "R", "wrapper", "with", "pre", "-", "segmented", "CNVkit", "or", "GATK4", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L77-L118
224,292
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_segment_normalized_gatk
def _segment_normalized_gatk(cnr_file, work_dir, paired): """Segmentation of normalized inputs using GATK4, converting into standard input formats. """ work_dir = utils.safe_makedir(os.path.join(work_dir, "gatk-cnv")) seg_file = gatkcnv.model_segments(cnr_file, work_dir, paired)["seg"] std_seg_file = seg_file.replace(".cr.seg", ".seg") if not utils.file_uptodate(std_seg_file, seg_file): with file_transaction(std_seg_file) as tx_out_file: df = pd.read_csv(seg_file, sep="\t", comment="@", header=0, names=["chrom", "loc.start", "loc.end", "num.mark", "seg.mean"]) df.insert(0, "ID", [dd.get_sample_name(paired.tumor_data)] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) std_cnr_file = os.path.join(work_dir, "%s.cnr" % dd.get_sample_name(paired.tumor_data)) if not utils.file_uptodate(std_cnr_file, cnr_file): with file_transaction(std_cnr_file) as tx_out_file: logdf = pd.read_csv(cnr_file, sep="\t", comment="@", header=0, names=["chrom", "start", "end", "log2"]) covdf = pd.read_csv(tz.get_in(["depth", "bins", "antitarget"], paired.tumor_data), sep="\t", header=None, names=["chrom", "start", "end", "orig.name", "depth", "gene"]) df = pd.merge(logdf, covdf, on=["chrom", "start", "end"]) del df["orig.name"] df = df[["chrom", "start", "end", "gene", "log2", "depth"]] df.insert(6, "weight", [1.0] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) return std_cnr_file, std_seg_file
python
def _segment_normalized_gatk(cnr_file, work_dir, paired): """Segmentation of normalized inputs using GATK4, converting into standard input formats. """ work_dir = utils.safe_makedir(os.path.join(work_dir, "gatk-cnv")) seg_file = gatkcnv.model_segments(cnr_file, work_dir, paired)["seg"] std_seg_file = seg_file.replace(".cr.seg", ".seg") if not utils.file_uptodate(std_seg_file, seg_file): with file_transaction(std_seg_file) as tx_out_file: df = pd.read_csv(seg_file, sep="\t", comment="@", header=0, names=["chrom", "loc.start", "loc.end", "num.mark", "seg.mean"]) df.insert(0, "ID", [dd.get_sample_name(paired.tumor_data)] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) std_cnr_file = os.path.join(work_dir, "%s.cnr" % dd.get_sample_name(paired.tumor_data)) if not utils.file_uptodate(std_cnr_file, cnr_file): with file_transaction(std_cnr_file) as tx_out_file: logdf = pd.read_csv(cnr_file, sep="\t", comment="@", header=0, names=["chrom", "start", "end", "log2"]) covdf = pd.read_csv(tz.get_in(["depth", "bins", "antitarget"], paired.tumor_data), sep="\t", header=None, names=["chrom", "start", "end", "orig.name", "depth", "gene"]) df = pd.merge(logdf, covdf, on=["chrom", "start", "end"]) del df["orig.name"] df = df[["chrom", "start", "end", "gene", "log2", "depth"]] df.insert(6, "weight", [1.0] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) return std_cnr_file, std_seg_file
[ "def", "_segment_normalized_gatk", "(", "cnr_file", ",", "work_dir", ",", "paired", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"gatk-cnv\"", ")", ")", "seg_file", "=", "gatkcnv", "....
Segmentation of normalized inputs using GATK4, converting into standard input formats.
[ "Segmentation", "of", "normalized", "inputs", "using", "GATK4", "converting", "into", "standard", "input", "formats", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L126-L151
224,293
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_segment_normalized_cnvkit
def _segment_normalized_cnvkit(cnr_file, work_dir, paired): """Segmentation of normalized inputs using CNVkit. """ cnvkit_base = os.path.join(utils.safe_makedir(os.path.join(work_dir, "cnvkit")), dd.get_sample_name(paired.tumor_data)) cnr_file = chromhacks.bed_to_standardonly(cnr_file, paired.tumor_data, headers="chromosome", include_sex_chroms=True, out_dir=os.path.dirname(cnvkit_base)) cnr_file = _remove_overlaps(cnr_file, os.path.dirname(cnvkit_base), paired.tumor_data) seg_file = cnvkit.segment_from_cnr(cnr_file, paired.tumor_data, cnvkit_base) return cnr_file, seg_file
python
def _segment_normalized_cnvkit(cnr_file, work_dir, paired): """Segmentation of normalized inputs using CNVkit. """ cnvkit_base = os.path.join(utils.safe_makedir(os.path.join(work_dir, "cnvkit")), dd.get_sample_name(paired.tumor_data)) cnr_file = chromhacks.bed_to_standardonly(cnr_file, paired.tumor_data, headers="chromosome", include_sex_chroms=True, out_dir=os.path.dirname(cnvkit_base)) cnr_file = _remove_overlaps(cnr_file, os.path.dirname(cnvkit_base), paired.tumor_data) seg_file = cnvkit.segment_from_cnr(cnr_file, paired.tumor_data, cnvkit_base) return cnr_file, seg_file
[ "def", "_segment_normalized_cnvkit", "(", "cnr_file", ",", "work_dir", ",", "paired", ")", ":", "cnvkit_base", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"cnvkit\"", ...
Segmentation of normalized inputs using CNVkit.
[ "Segmentation", "of", "normalized", "inputs", "using", "CNVkit", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L153-L163
224,294
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_remove_overlaps
def _remove_overlaps(in_file, out_dir, data): """Remove regions that overlap with next region, these result in issues with PureCN. """ out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
python
def _remove_overlaps(in_file, out_dir, data): """Remove regions that overlap with next region, these result in issues with PureCN. """ out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
[ "def", "_remove_overlaps", "(", "in_file", ",", "out_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-nooverlaps%s\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(...
Remove regions that overlap with next region, these result in issues with PureCN.
[ "Remove", "regions", "that", "overlap", "with", "next", "region", "these", "result", "in", "issues", "with", "PureCN", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L165-L185
224,295
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_get_purecn_files
def _get_purecn_files(paired, work_dir, require_exist=False): """Retrieve organized structure of PureCN output files. """ out_base = os.path.join(work_dir, "%s-purecn" % (dd.get_sample_name(paired.tumor_data))) out = {"plot": {}} all_files = [] for plot in ["chromosomes", "local_optima", "segmentation", "summary"]: if plot == "summary": cur_file = "%s.pdf" % out_base else: cur_file = "%s_%s.pdf" % (out_base, plot) if not require_exist or os.path.exists(cur_file): out["plot"][plot] = cur_file all_files.append(os.path.basename(cur_file)) for key, ext in [["hetsummary", ".csv"], ["dnacopy", "_dnacopy.seg"], ["genes", "_genes.csv"], ["log", ".log"], ["loh", "_loh.csv"], ["rds", ".rds"], ["variants", "_variants.csv"]]: cur_file = "%s%s" % (out_base, ext) if not require_exist or os.path.exists(cur_file): out[key] = cur_file all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
python
def _get_purecn_files(paired, work_dir, require_exist=False): """Retrieve organized structure of PureCN output files. """ out_base = os.path.join(work_dir, "%s-purecn" % (dd.get_sample_name(paired.tumor_data))) out = {"plot": {}} all_files = [] for plot in ["chromosomes", "local_optima", "segmentation", "summary"]: if plot == "summary": cur_file = "%s.pdf" % out_base else: cur_file = "%s_%s.pdf" % (out_base, plot) if not require_exist or os.path.exists(cur_file): out["plot"][plot] = cur_file all_files.append(os.path.basename(cur_file)) for key, ext in [["hetsummary", ".csv"], ["dnacopy", "_dnacopy.seg"], ["genes", "_genes.csv"], ["log", ".log"], ["loh", "_loh.csv"], ["rds", ".rds"], ["variants", "_variants.csv"]]: cur_file = "%s%s" % (out_base, ext) if not require_exist or os.path.exists(cur_file): out[key] = cur_file all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
[ "def", "_get_purecn_files", "(", "paired", ",", "work_dir", ",", "require_exist", "=", "False", ")", ":", "out_base", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-purecn\"", "%", "(", "dd", ".", "get_sample_name", "(", "paired", ".", ...
Retrieve organized structure of PureCN output files.
[ "Retrieve", "organized", "structure", "of", "PureCN", "output", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L187-L208
224,296
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_loh_to_vcf
def _loh_to_vcf(cur): """Convert LOH output into standardized VCF. """ cn = int(float(cur["C"])) minor_cn = int(float(cur["M"])) if cur["type"].find("LOH"): svtype = "LOH" elif cn > 2: svtype = "DUP" elif cn < 1: svtype = "DEL" else: svtype = None if svtype: info = ["SVTYPE=%s" % svtype, "END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "CN=%s" % cn, "MajorCN=%s" % (cn - minor_cn), "MinorCN=%s" % minor_cn] return [cur["chr"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
python
def _loh_to_vcf(cur): """Convert LOH output into standardized VCF. """ cn = int(float(cur["C"])) minor_cn = int(float(cur["M"])) if cur["type"].find("LOH"): svtype = "LOH" elif cn > 2: svtype = "DUP" elif cn < 1: svtype = "DEL" else: svtype = None if svtype: info = ["SVTYPE=%s" % svtype, "END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "CN=%s" % cn, "MajorCN=%s" % (cn - minor_cn), "MinorCN=%s" % minor_cn] return [cur["chr"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
[ "def", "_loh_to_vcf", "(", "cur", ")", ":", "cn", "=", "int", "(", "float", "(", "cur", "[", "\"C\"", "]", ")", ")", "minor_cn", "=", "int", "(", "float", "(", "cur", "[", "\"M\"", "]", ")", ")", "if", "cur", "[", "\"type\"", "]", ".", "find", ...
Convert LOH output into standardized VCF.
[ "Convert", "LOH", "output", "into", "standardized", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L219-L237
224,297
bcbio/bcbio-nextgen
scripts/utils/collect_metrics_to_csv.py
_generate_metrics
def _generate_metrics(bam_fname, config_file, ref_file, bait_file, target_file): """Run Picard commands to generate metrics files when missing. """ with open(config_file) as in_handle: config = yaml.safe_load(in_handle) broad_runner = broad.runner_from_config(config) bam_fname = os.path.abspath(bam_fname) path = os.path.dirname(bam_fname) out_dir = os.path.join(path, "metrics") utils.safe_makedir(out_dir) with utils.chdir(out_dir): with tx_tmpdir() as tmp_dir: cur_bam = os.path.basename(bam_fname) if not os.path.exists(cur_bam): os.symlink(bam_fname, cur_bam) gen_metrics = PicardMetrics(broad_runner, tmp_dir) gen_metrics.report(cur_bam, ref_file, _bam_is_paired(bam_fname), bait_file, target_file) return out_dir
python
def _generate_metrics(bam_fname, config_file, ref_file, bait_file, target_file): """Run Picard commands to generate metrics files when missing. """ with open(config_file) as in_handle: config = yaml.safe_load(in_handle) broad_runner = broad.runner_from_config(config) bam_fname = os.path.abspath(bam_fname) path = os.path.dirname(bam_fname) out_dir = os.path.join(path, "metrics") utils.safe_makedir(out_dir) with utils.chdir(out_dir): with tx_tmpdir() as tmp_dir: cur_bam = os.path.basename(bam_fname) if not os.path.exists(cur_bam): os.symlink(bam_fname, cur_bam) gen_metrics = PicardMetrics(broad_runner, tmp_dir) gen_metrics.report(cur_bam, ref_file, _bam_is_paired(bam_fname), bait_file, target_file) return out_dir
[ "def", "_generate_metrics", "(", "bam_fname", ",", "config_file", ",", "ref_file", ",", "bait_file", ",", "target_file", ")", ":", "with", "open", "(", "config_file", ")", "as", "in_handle", ":", "config", "=", "yaml", ".", "safe_load", "(", "in_handle", ")"...
Run Picard commands to generate metrics files when missing.
[ "Run", "Picard", "commands", "to", "generate", "metrics", "files", "when", "missing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/collect_metrics_to_csv.py#L135-L155
224,298
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
run
def run(items, background=None): """Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller """ if not background: background = [] paired = vcfutils.get_paired(items + background) if paired: out = _run_paired(paired) else: out = items logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" % ", ".join([dd.get_sample_name(d) for d in items + background])) return out
python
def run(items, background=None): """Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller """ if not background: background = [] paired = vcfutils.get_paired(items + background) if paired: out = _run_paired(paired) else: out = items logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" % ", ".join([dd.get_sample_name(d) for d in items + background])) return out
[ "def", "run", "(", "items", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", "+", "background", ")", "if", "paired", ":", "out", "=", "_...
Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller
[ "Detect", "copy", "number", "variations", "from", "batched", "set", "of", "samples", "using", "GATK4", "CNV", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L19-L32
224,299
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
_run_paired
def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out
python
def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out
[ "def", "_run_paired", "(", "paired", ")", ":", "from", "bcbio", ".", "structural", "import", "titancna", "work_dir", "=", "_sv_workdir", "(", "paired", ".", "tumor_data", ")", "seg_files", "=", "model_segments", "(", "tz", ".", "get_in", "(", "[", "\"depth\"...
Run somatic variant calling pipeline.
[ "Run", "somatic", "variant", "calling", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L34-L54