id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
224,000
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_samplejson_to_inputs
def _samplejson_to_inputs(svals): """Convert sample output into inputs for CWL configuration files, with types. """ out = [] for key, val in svals.items(): out.append(_add_suppl_info({"id": "%s" % key}, val)) return out
python
def _samplejson_to_inputs(svals): """Convert sample output into inputs for CWL configuration files, with types. """ out = [] for key, val in svals.items(): out.append(_add_suppl_info({"id": "%s" % key}, val)) return out
[ "def", "_samplejson_to_inputs", "(", "svals", ")", ":", "out", "=", "[", "]", "for", "key", ",", "val", "in", "svals", ".", "items", "(", ")", ":", "out", ".", "append", "(", "_add_suppl_info", "(", "{", "\"id\"", ":", "\"%s\"", "%", "key", "}", ",...
Convert sample output into inputs for CWL configuration files, with types.
[ "Convert", "sample", "output", "into", "inputs", "for", "CWL", "configuration", "files", "with", "types", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L655-L661
224,001
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_to_cwldata
def _to_cwldata(key, val, get_retriever): """Convert nested dictionary into CWL data, flatening and marking up files. Moves file objects to the top level, enabling insertion in CWL inputs/outputs. """ out = [] if isinstance(val, dict): if len(val) == 2 and "base" in val and "indexes" in val: if len(val["indexes"]) > 0 and val["base"] == val["indexes"][0]: out.append(("%s__indexes" % key, _item_to_cwldata(val["base"], get_retriever))) else: out.append((key, _to_cwlfile_with_indexes(val, get_retriever))) # Dump shared nested keys like resources as a JSON string elif key in workflow.ALWAYS_AVAILABLE or key in workflow.STRING_DICT: out.append((key, _item_to_cwldata(json.dumps(val), get_retriever))) elif key in workflow.FLAT_DICT: flat = [] for k, vs in val.items(): if not isinstance(vs, (list, tuple)): vs = [vs] for v in vs: flat.append("%s:%s" % (k, v)) out.append((key, _item_to_cwldata(flat, get_retriever))) else: remain_val = {} for nkey, nval in val.items(): cur_nkey = "%s__%s" % (key, nkey) cwl_nval = _item_to_cwldata(nval, get_retriever) if isinstance(cwl_nval, dict): out.extend(_to_cwldata(cur_nkey, nval, get_retriever)) elif key in workflow.ALWAYS_AVAILABLE: remain_val[nkey] = nval else: out.append((cur_nkey, cwl_nval)) if remain_val: out.append((key, json.dumps(remain_val, sort_keys=True, separators=(',', ':')))) else: out.append((key, _item_to_cwldata(val, get_retriever))) return out
python
def _to_cwldata(key, val, get_retriever): """Convert nested dictionary into CWL data, flatening and marking up files. Moves file objects to the top level, enabling insertion in CWL inputs/outputs. """ out = [] if isinstance(val, dict): if len(val) == 2 and "base" in val and "indexes" in val: if len(val["indexes"]) > 0 and val["base"] == val["indexes"][0]: out.append(("%s__indexes" % key, _item_to_cwldata(val["base"], get_retriever))) else: out.append((key, _to_cwlfile_with_indexes(val, get_retriever))) # Dump shared nested keys like resources as a JSON string elif key in workflow.ALWAYS_AVAILABLE or key in workflow.STRING_DICT: out.append((key, _item_to_cwldata(json.dumps(val), get_retriever))) elif key in workflow.FLAT_DICT: flat = [] for k, vs in val.items(): if not isinstance(vs, (list, tuple)): vs = [vs] for v in vs: flat.append("%s:%s" % (k, v)) out.append((key, _item_to_cwldata(flat, get_retriever))) else: remain_val = {} for nkey, nval in val.items(): cur_nkey = "%s__%s" % (key, nkey) cwl_nval = _item_to_cwldata(nval, get_retriever) if isinstance(cwl_nval, dict): out.extend(_to_cwldata(cur_nkey, nval, get_retriever)) elif key in workflow.ALWAYS_AVAILABLE: remain_val[nkey] = nval else: out.append((cur_nkey, cwl_nval)) if remain_val: out.append((key, json.dumps(remain_val, sort_keys=True, separators=(',', ':')))) else: out.append((key, _item_to_cwldata(val, get_retriever))) return out
[ "def", "_to_cwldata", "(", "key", ",", "val", ",", "get_retriever", ")", ":", "out", "=", "[", "]", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "if", "len", "(", "val", ")", "==", "2", "and", "\"base\"", "in", "val", "and", "\"indexes\""...
Convert nested dictionary into CWL data, flatening and marking up files. Moves file objects to the top level, enabling insertion in CWL inputs/outputs.
[ "Convert", "nested", "dictionary", "into", "CWL", "data", "flatening", "and", "marking", "up", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L663-L701
224,002
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_to_cwlfile_with_indexes
def _to_cwlfile_with_indexes(val, get_retriever): """Convert reads with ready to go indexes into the right CWL object. Identifies the top level directory and creates a tarball, avoiding trying to handle complex secondary setups which are not cross platform. Skips doing this for reference files and standard setups like bwa, which take up too much time and space to unpack multiple times. """ val["indexes"] = _index_blacklist(val["indexes"]) tval = {"base": _remove_remote_prefix(val["base"]), "indexes": [_remove_remote_prefix(f) for f in val["indexes"]]} # Standard named set of indices, like bwa # Do not include snpEff, which we need to isolate inside a nested directory # hisat2 indices do also not localize cleanly due to compilicated naming cp_dir, cp_base = os.path.split(os.path.commonprefix([tval["base"]] + tval["indexes"])) if (cp_base and cp_dir == os.path.dirname(tval["base"]) and not ("/snpeff/" in cp_dir or "/hisat2" in cp_dir)): return _item_to_cwldata(val["base"], get_retriever, val["indexes"]) else: dirname = os.path.dirname(tval["base"]) assert all([x.startswith(dirname) for x in tval["indexes"]]) return {"class": "File", "path": directory_tarball(dirname)}
python
def _to_cwlfile_with_indexes(val, get_retriever): """Convert reads with ready to go indexes into the right CWL object. Identifies the top level directory and creates a tarball, avoiding trying to handle complex secondary setups which are not cross platform. Skips doing this for reference files and standard setups like bwa, which take up too much time and space to unpack multiple times. """ val["indexes"] = _index_blacklist(val["indexes"]) tval = {"base": _remove_remote_prefix(val["base"]), "indexes": [_remove_remote_prefix(f) for f in val["indexes"]]} # Standard named set of indices, like bwa # Do not include snpEff, which we need to isolate inside a nested directory # hisat2 indices do also not localize cleanly due to compilicated naming cp_dir, cp_base = os.path.split(os.path.commonprefix([tval["base"]] + tval["indexes"])) if (cp_base and cp_dir == os.path.dirname(tval["base"]) and not ("/snpeff/" in cp_dir or "/hisat2" in cp_dir)): return _item_to_cwldata(val["base"], get_retriever, val["indexes"]) else: dirname = os.path.dirname(tval["base"]) assert all([x.startswith(dirname) for x in tval["indexes"]]) return {"class": "File", "path": directory_tarball(dirname)}
[ "def", "_to_cwlfile_with_indexes", "(", "val", ",", "get_retriever", ")", ":", "val", "[", "\"indexes\"", "]", "=", "_index_blacklist", "(", "val", "[", "\"indexes\"", "]", ")", "tval", "=", "{", "\"base\"", ":", "_remove_remote_prefix", "(", "val", "[", "\"...
Convert reads with ready to go indexes into the right CWL object. Identifies the top level directory and creates a tarball, avoiding trying to handle complex secondary setups which are not cross platform. Skips doing this for reference files and standard setups like bwa, which take up too much time and space to unpack multiple times.
[ "Convert", "reads", "with", "ready", "to", "go", "indexes", "into", "the", "right", "CWL", "object", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L712-L734
224,003
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_add_secondary_if_exists
def _add_secondary_if_exists(secondary, out, get_retriever): """Add secondary files only if present locally or remotely. """ secondary = [_file_local_or_remote(y, get_retriever) for y in secondary] secondary = [z for z in secondary if z] if secondary: out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary] return out
python
def _add_secondary_if_exists(secondary, out, get_retriever): """Add secondary files only if present locally or remotely. """ secondary = [_file_local_or_remote(y, get_retriever) for y in secondary] secondary = [z for z in secondary if z] if secondary: out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary] return out
[ "def", "_add_secondary_if_exists", "(", "secondary", ",", "out", ",", "get_retriever", ")", ":", "secondary", "=", "[", "_file_local_or_remote", "(", "y", ",", "get_retriever", ")", "for", "y", "in", "secondary", "]", "secondary", "=", "[", "z", "for", "z", ...
Add secondary files only if present locally or remotely.
[ "Add", "secondary", "files", "only", "if", "present", "locally", "or", "remotely", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L736-L743
224,004
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_item_to_cwldata
def _item_to_cwldata(x, get_retriever, indexes=None): """"Markup an item with CWL specific metadata. """ if isinstance(x, (list, tuple)): return [_item_to_cwldata(subx, get_retriever) for subx in x] elif (x and isinstance(x, six.string_types) and (((os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x)) or objectstore.is_remote(x))): if _file_local_or_remote(x, get_retriever): out = {"class": "File", "path": x} if indexes: out = _add_secondary_if_exists(indexes, out, get_retriever) elif x.endswith(".bam"): out = _add_secondary_if_exists([x + ".bai"], out, get_retriever) elif x.endswith(".cram"): out = _add_secondary_if_exists([x + ".crai"], out, get_retriever) elif x.endswith((".vcf.gz", ".bed.gz")): out = _add_secondary_if_exists([x + ".tbi"], out, get_retriever) elif x.endswith(".fa"): out = _add_secondary_if_exists([x + ".fai", os.path.splitext(x)[0] + ".dict"], out, get_retriever) elif x.endswith(".fa.gz"): out = _add_secondary_if_exists([x + ".fai", x + ".gzi", x.replace(".fa.gz", "") + ".dict"], out, get_retriever) elif x.endswith(".fq.gz") or x.endswith(".fastq.gz"): out = _add_secondary_if_exists([x + ".gbi"], out, get_retriever) elif x.endswith(".gtf"): out = _add_secondary_if_exists([x + ".db"], out, get_retriever) else: out = {"class": "File", "path": directory_tarball(x)} return out elif isinstance(x, bool): return str(x) else: return x
python
def _item_to_cwldata(x, get_retriever, indexes=None): """"Markup an item with CWL specific metadata. """ if isinstance(x, (list, tuple)): return [_item_to_cwldata(subx, get_retriever) for subx in x] elif (x and isinstance(x, six.string_types) and (((os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x)) or objectstore.is_remote(x))): if _file_local_or_remote(x, get_retriever): out = {"class": "File", "path": x} if indexes: out = _add_secondary_if_exists(indexes, out, get_retriever) elif x.endswith(".bam"): out = _add_secondary_if_exists([x + ".bai"], out, get_retriever) elif x.endswith(".cram"): out = _add_secondary_if_exists([x + ".crai"], out, get_retriever) elif x.endswith((".vcf.gz", ".bed.gz")): out = _add_secondary_if_exists([x + ".tbi"], out, get_retriever) elif x.endswith(".fa"): out = _add_secondary_if_exists([x + ".fai", os.path.splitext(x)[0] + ".dict"], out, get_retriever) elif x.endswith(".fa.gz"): out = _add_secondary_if_exists([x + ".fai", x + ".gzi", x.replace(".fa.gz", "") + ".dict"], out, get_retriever) elif x.endswith(".fq.gz") or x.endswith(".fastq.gz"): out = _add_secondary_if_exists([x + ".gbi"], out, get_retriever) elif x.endswith(".gtf"): out = _add_secondary_if_exists([x + ".db"], out, get_retriever) else: out = {"class": "File", "path": directory_tarball(x)} return out elif isinstance(x, bool): return str(x) else: return x
[ "def", "_item_to_cwldata", "(", "x", ",", "get_retriever", ",", "indexes", "=", "None", ")", ":", "if", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "_item_to_cwldata", "(", "subx", ",", "get_retriever", ")", "f...
Markup an item with CWL specific metadata.
[ "Markup", "an", "item", "with", "CWL", "specific", "metadata", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L745-L778
224,005
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_file_local_or_remote
def _file_local_or_remote(f, get_retriever): """Check for presence of a local or remote file. """ if os.path.exists(f): return f integration, config = get_retriever.integration_and_config(f) if integration: return integration.file_exists(f, config)
python
def _file_local_or_remote(f, get_retriever): """Check for presence of a local or remote file. """ if os.path.exists(f): return f integration, config = get_retriever.integration_and_config(f) if integration: return integration.file_exists(f, config)
[ "def", "_file_local_or_remote", "(", "f", ",", "get_retriever", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "return", "f", "integration", ",", "config", "=", "get_retriever", ".", "integration_and_config", "(", "f", ")", "if", "...
Check for presence of a local or remote file.
[ "Check", "for", "presence", "of", "a", "local", "or", "remote", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L780-L787
224,006
bcbio/bcbio-nextgen
bcbio/cwl/create.py
directory_tarball
def directory_tarball(dirname): """Create a tarball of a complex directory, avoiding complex secondaryFiles. Complex secondary files do not work on multiple platforms and are not portable to WDL, so for now we create a tarball that workers will unpack. """ assert os.path.isdir(dirname), dirname base_dir, tarball_dir = os.path.split(dirname) while not os.path.exists(os.path.join(base_dir, "seq")) and base_dir and base_dir != "/": base_dir, extra_tarball = os.path.split(base_dir) tarball_dir = os.path.join(extra_tarball, tarball_dir) if base_dir == "/" and not os.path.exists(os.path.join(base_dir, "seq")): raise ValueError("Did not find relative directory to create tarball for %s" % dirname) tarball = os.path.join(base_dir, "%s-wf.tar.gz" % (tarball_dir.replace(os.path.sep, "--"))) if not utils.file_exists(tarball): print("Preparing CWL input tarball: %s" % tarball) with file_transaction({}, tarball) as tx_tarball: with utils.chdir(base_dir): with tarfile.open(tx_tarball, "w:gz") as tar: tar.add(tarball_dir) return tarball
python
def directory_tarball(dirname): """Create a tarball of a complex directory, avoiding complex secondaryFiles. Complex secondary files do not work on multiple platforms and are not portable to WDL, so for now we create a tarball that workers will unpack. """ assert os.path.isdir(dirname), dirname base_dir, tarball_dir = os.path.split(dirname) while not os.path.exists(os.path.join(base_dir, "seq")) and base_dir and base_dir != "/": base_dir, extra_tarball = os.path.split(base_dir) tarball_dir = os.path.join(extra_tarball, tarball_dir) if base_dir == "/" and not os.path.exists(os.path.join(base_dir, "seq")): raise ValueError("Did not find relative directory to create tarball for %s" % dirname) tarball = os.path.join(base_dir, "%s-wf.tar.gz" % (tarball_dir.replace(os.path.sep, "--"))) if not utils.file_exists(tarball): print("Preparing CWL input tarball: %s" % tarball) with file_transaction({}, tarball) as tx_tarball: with utils.chdir(base_dir): with tarfile.open(tx_tarball, "w:gz") as tar: tar.add(tarball_dir) return tarball
[ "def", "directory_tarball", "(", "dirname", ")", ":", "assert", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ",", "dirname", "base_dir", ",", "tarball_dir", "=", "os", ".", "path", ".", "split", "(", "dirname", ")", "while", "not", "os", ".", ...
Create a tarball of a complex directory, avoiding complex secondaryFiles. Complex secondary files do not work on multiple platforms and are not portable to WDL, so for now we create a tarball that workers will unpack.
[ "Create", "a", "tarball", "of", "a", "complex", "directory", "avoiding", "complex", "secondaryFiles", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L789-L809
224,007
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_calc_input_estimates
def _calc_input_estimates(keyvals, get_retriever): """Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that. """ out = {} for key, val in keyvals.items(): size = _calc_file_size(val, 0, get_retriever) if size: out[key] = size return out
python
def _calc_input_estimates(keyvals, get_retriever): """Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that. """ out = {} for key, val in keyvals.items(): size = _calc_file_size(val, 0, get_retriever) if size: out[key] = size return out
[ "def", "_calc_input_estimates", "(", "keyvals", ",", "get_retriever", ")", ":", "out", "=", "{", "}", "for", "key", ",", "val", "in", "keyvals", ".", "items", "(", ")", ":", "size", "=", "_calc_file_size", "(", "val", ",", "0", ",", "get_retriever", ")...
Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that.
[ "Calculate", "estimations", "of", "input", "file", "sizes", "for", "disk", "usage", "approximation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L853-L863
224,008
bcbio/bcbio-nextgen
bcbio/cwl/create.py
_get_file_size
def _get_file_size(path, get_retriever): """Return file size in megabytes, including querying remote integrations """ integration, config = get_retriever.integration_and_config(path) if integration: return integration.file_size(path, config) elif os.path.exists(path): return os.path.getsize(path) / (1024.0 * 1024.0)
python
def _get_file_size(path, get_retriever): """Return file size in megabytes, including querying remote integrations """ integration, config = get_retriever.integration_and_config(path) if integration: return integration.file_size(path, config) elif os.path.exists(path): return os.path.getsize(path) / (1024.0 * 1024.0)
[ "def", "_get_file_size", "(", "path", ",", "get_retriever", ")", ":", "integration", ",", "config", "=", "get_retriever", ".", "integration_and_config", "(", "path", ")", "if", "integration", ":", "return", "integration", ".", "file_size", "(", "path", ",", "c...
Return file size in megabytes, including querying remote integrations
[ "Return", "file", "size", "in", "megabytes", "including", "querying", "remote", "integrations" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L896-L903
224,009
bcbio/bcbio-nextgen
bcbio/cwl/create.py
GetRetriever.integration_and_config
def integration_and_config(self, path): """Get a retriever and configuration for the given file path. """ if path.startswith(tuple(INTEGRATION_MAP.keys())): key = INTEGRATION_MAP[path.split(":")[0] + ":"] integration = self._integrations.get(key) config = {} for sample in self._samples: config = tz.get_in(["config", key], sample) if config: break return integration, config return None, None
python
def integration_and_config(self, path): """Get a retriever and configuration for the given file path. """ if path.startswith(tuple(INTEGRATION_MAP.keys())): key = INTEGRATION_MAP[path.split(":")[0] + ":"] integration = self._integrations.get(key) config = {} for sample in self._samples: config = tz.get_in(["config", key], sample) if config: break return integration, config return None, None
[ "def", "integration_and_config", "(", "self", ",", "path", ")", ":", "if", "path", ".", "startswith", "(", "tuple", "(", "INTEGRATION_MAP", ".", "keys", "(", ")", ")", ")", ":", "key", "=", "INTEGRATION_MAP", "[", "path", ".", "split", "(", "\":\"", ")...
Get a retriever and configuration for the given file path.
[ "Get", "a", "retriever", "and", "configuration", "for", "the", "given", "file", "path", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L881-L894
224,010
bcbio/bcbio-nextgen
bcbio/rnaseq/singlecellexperiment.py
make_scrnaseq_object
def make_scrnaseq_object(samples): """ load the initial se.rda object using sinclecell-experiment """ local_sitelib = R_sitelib() counts_dir = os.path.dirname(dd.get_in_samples(samples, dd.get_combined_counts)) gtf_file = dd.get_in_samples(samples, dd.get_transcriptome_gtf) if not gtf_file: gtf_file = dd.get_in_samples(samples, dd.get_gtf_file) rda_file = os.path.join(counts_dir, "se.rda") if not file_exists(rda_file): with file_transaction(rda_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(rda_file)[0] rrna_file = "%s-rrna.txt" % os.path.splitext(rda_file)[0] rrna_file = _find_rRNA_genes(gtf_file, rrna_file) with open(rcode, "w") as out_handle: out_handle.write(_script.format(**locals())) rscript = Rscript_cmd() try: # do.run([rscript, "--no-environ", rcode], # "SingleCellExperiment", # log_error=False) rda_file = rcode except subprocess.CalledProcessError as msg: logger.exception()
python
def make_scrnaseq_object(samples): """ load the initial se.rda object using sinclecell-experiment """ local_sitelib = R_sitelib() counts_dir = os.path.dirname(dd.get_in_samples(samples, dd.get_combined_counts)) gtf_file = dd.get_in_samples(samples, dd.get_transcriptome_gtf) if not gtf_file: gtf_file = dd.get_in_samples(samples, dd.get_gtf_file) rda_file = os.path.join(counts_dir, "se.rda") if not file_exists(rda_file): with file_transaction(rda_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(rda_file)[0] rrna_file = "%s-rrna.txt" % os.path.splitext(rda_file)[0] rrna_file = _find_rRNA_genes(gtf_file, rrna_file) with open(rcode, "w") as out_handle: out_handle.write(_script.format(**locals())) rscript = Rscript_cmd() try: # do.run([rscript, "--no-environ", rcode], # "SingleCellExperiment", # log_error=False) rda_file = rcode except subprocess.CalledProcessError as msg: logger.exception()
[ "def", "make_scrnaseq_object", "(", "samples", ")", ":", "local_sitelib", "=", "R_sitelib", "(", ")", "counts_dir", "=", "os", ".", "path", ".", "dirname", "(", "dd", ".", "get_in_samples", "(", "samples", ",", "dd", ".", "get_combined_counts", ")", ")", "...
load the initial se.rda object using sinclecell-experiment
[ "load", "the", "initial", "se", ".", "rda", "object", "using", "sinclecell", "-", "experiment" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/singlecellexperiment.py#L13-L37
224,011
bcbio/bcbio-nextgen
bcbio/distributed/multi.py
runner
def runner(parallel, config): """Run functions, provided by string name, on multiple cores on the current machine. """ def run_parallel(fn_name, items): items = [x for x in items if x is not None] if len(items) == 0: return [] items = diagnostics.track_parallel(items, fn_name) fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name) logger.info("multiprocessing: %s" % fn_name) if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] return run_multicore(fn, items, config, parallel=parallel) return run_parallel
python
def runner(parallel, config): """Run functions, provided by string name, on multiple cores on the current machine. """ def run_parallel(fn_name, items): items = [x for x in items if x is not None] if len(items) == 0: return [] items = diagnostics.track_parallel(items, fn_name) fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name) logger.info("multiprocessing: %s" % fn_name) if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] return run_multicore(fn, items, config, parallel=parallel) return run_parallel
[ "def", "runner", "(", "parallel", ",", "config", ")", ":", "def", "run_parallel", "(", "fn_name", ",", "items", ")", ":", "items", "=", "[", "x", "for", "x", "in", "items", "if", "x", "is", "not", "None", "]", "if", "len", "(", "items", ")", "=="...
Run functions, provided by string name, on multiple cores on the current machine.
[ "Run", "functions", "provided", "by", "string", "name", "on", "multiple", "cores", "on", "the", "current", "machine", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/multi.py#L15-L29
224,012
bcbio/bcbio-nextgen
bcbio/distributed/multi.py
zeromq_aware_logging
def zeromq_aware_logging(f): """Ensure multiprocessing logging uses ZeroMQ queues. ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This ensures the local logging uses existing ZeroMQ logging queues. """ @functools.wraps(f) def wrapper(*args, **kwargs): config = None for arg in args: if config_utils.is_std_config_arg(arg): config = arg break elif config_utils.is_nested_config_arg(arg): config = arg["config"] elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]): config = arg[0]["config"] break assert config, "Could not find config dictionary in function arguments." if config.get("parallel", {}).get("log_queue") and not config.get("parallel", {}).get("wrapper"): handler = setup_local_logging(config, config["parallel"]) else: handler = None try: out = f(*args, **kwargs) finally: if handler and hasattr(handler, "close"): handler.close() return out return wrapper
python
def zeromq_aware_logging(f): """Ensure multiprocessing logging uses ZeroMQ queues. ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This ensures the local logging uses existing ZeroMQ logging queues. """ @functools.wraps(f) def wrapper(*args, **kwargs): config = None for arg in args: if config_utils.is_std_config_arg(arg): config = arg break elif config_utils.is_nested_config_arg(arg): config = arg["config"] elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]): config = arg[0]["config"] break assert config, "Could not find config dictionary in function arguments." if config.get("parallel", {}).get("log_queue") and not config.get("parallel", {}).get("wrapper"): handler = setup_local_logging(config, config["parallel"]) else: handler = None try: out = f(*args, **kwargs) finally: if handler and hasattr(handler, "close"): handler.close() return out return wrapper
[ "def", "zeromq_aware_logging", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "config", "=", "None", "for", "arg", "in", "args", ":", "if", "config_utils", "...
Ensure multiprocessing logging uses ZeroMQ queues. ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This ensures the local logging uses existing ZeroMQ logging queues.
[ "Ensure", "multiprocessing", "logging", "uses", "ZeroMQ", "queues", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/multi.py#L39-L68
224,013
bcbio/bcbio-nextgen
bcbio/distributed/multi.py
run_multicore
def run_multicore(fn, items, config, parallel=None): """Run the function using multiple cores on the given items to process. """ if len(items) == 0: return [] if parallel is None or "num_jobs" not in parallel: if parallel is None: parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)} sysinfo = system.get_info({}, parallel) parallel = resources.calculate(parallel, items, sysinfo, config, parallel.get("multiplier", 1), max_multicore=int(parallel.get("max_multicore", sysinfo["cores"]))) items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items] if joblib is None: raise ImportError("Need joblib for multiprocessing parallelization") out = [] for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")(joblib.delayed(fn)(*x) for x in items): if data: out.extend(data) return out
python
def run_multicore(fn, items, config, parallel=None): """Run the function using multiple cores on the given items to process. """ if len(items) == 0: return [] if parallel is None or "num_jobs" not in parallel: if parallel is None: parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)} sysinfo = system.get_info({}, parallel) parallel = resources.calculate(parallel, items, sysinfo, config, parallel.get("multiplier", 1), max_multicore=int(parallel.get("max_multicore", sysinfo["cores"]))) items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items] if joblib is None: raise ImportError("Need joblib for multiprocessing parallelization") out = [] for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")(joblib.delayed(fn)(*x) for x in items): if data: out.extend(data) return out
[ "def", "run_multicore", "(", "fn", ",", "items", ",", "config", ",", "parallel", "=", "None", ")", ":", "if", "len", "(", "items", ")", "==", "0", ":", "return", "[", "]", "if", "parallel", "is", "None", "or", "\"num_jobs\"", "not", "in", "parallel",...
Run the function using multiple cores on the given items to process.
[ "Run", "the", "function", "using", "multiple", "cores", "on", "the", "given", "items", "to", "process", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/multi.py#L70-L89
224,014
bcbio/bcbio-nextgen
scripts/bcbio_fastq_umi_prep.py
_add_umis_with_fastp
def _add_umis_with_fastp(read_fq, umi_fq, out_fq, cores): """Add UMIs to reads from separate UMI file using fastp. """ with utils.open_gzipsafe(umi_fq) as in_handle: in_handle.readline() # name umi_size = len(in_handle.readline().strip()) cmd = ("fastp -Q -A -L -G -w 1 --in1 {read_fq} --in2 {umi_fq} " "--umi --umi_prefix UMI --umi_loc read2 --umi_len {umi_size} " "--out1 >(bgzip --threads {cores} -c > {out_fq}) --out2 /dev/null " "-j /dev/null -h /dev/null") do.run(cmd.format(**locals()), "Add UMIs to fastq file with fastp")
python
def _add_umis_with_fastp(read_fq, umi_fq, out_fq, cores): """Add UMIs to reads from separate UMI file using fastp. """ with utils.open_gzipsafe(umi_fq) as in_handle: in_handle.readline() # name umi_size = len(in_handle.readline().strip()) cmd = ("fastp -Q -A -L -G -w 1 --in1 {read_fq} --in2 {umi_fq} " "--umi --umi_prefix UMI --umi_loc read2 --umi_len {umi_size} " "--out1 >(bgzip --threads {cores} -c > {out_fq}) --out2 /dev/null " "-j /dev/null -h /dev/null") do.run(cmd.format(**locals()), "Add UMIs to fastq file with fastp")
[ "def", "_add_umis_with_fastp", "(", "read_fq", ",", "umi_fq", ",", "out_fq", ",", "cores", ")", ":", "with", "utils", ".", "open_gzipsafe", "(", "umi_fq", ")", "as", "in_handle", ":", "in_handle", ".", "readline", "(", ")", "# name", "umi_size", "=", "len"...
Add UMIs to reads from separate UMI file using fastp.
[ "Add", "UMIs", "to", "reads", "from", "separate", "UMI", "file", "using", "fastp", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_fastq_umi_prep.py#L117-L127
224,015
bcbio/bcbio-nextgen
scripts/bcbio_fastq_umi_prep.py
_find_umi
def _find_umi(files): """Find UMI file using different naming schemes. R1/R2/R3 => R1/R3 with R2 UMI R1/R2/I1 => R1/R2 with I1 UMI """ base = os.path.basename(_commonprefix(files)) def _file_ext(f): exts = utils.splitext_plus(os.path.basename(f).replace(base, ""))[0].split("_") exts = [x for x in exts if x] return exts[0] exts = dict([(_file_ext(f), f) for f in files]) if "I1" in exts: return exts["R1"], exts["R2"], exts["I1"] else: assert "R3" in exts, exts return exts["R1"], exts["R3"], exts["R2"]
python
def _find_umi(files): """Find UMI file using different naming schemes. R1/R2/R3 => R1/R3 with R2 UMI R1/R2/I1 => R1/R2 with I1 UMI """ base = os.path.basename(_commonprefix(files)) def _file_ext(f): exts = utils.splitext_plus(os.path.basename(f).replace(base, ""))[0].split("_") exts = [x for x in exts if x] return exts[0] exts = dict([(_file_ext(f), f) for f in files]) if "I1" in exts: return exts["R1"], exts["R2"], exts["I1"] else: assert "R3" in exts, exts return exts["R1"], exts["R3"], exts["R2"]
[ "def", "_find_umi", "(", "files", ")", ":", "base", "=", "os", ".", "path", ".", "basename", "(", "_commonprefix", "(", "files", ")", ")", "def", "_file_ext", "(", "f", ")", ":", "exts", "=", "utils", ".", "splitext_plus", "(", "os", ".", "path", "...
Find UMI file using different naming schemes. R1/R2/R3 => R1/R3 with R2 UMI R1/R2/I1 => R1/R2 with I1 UMI
[ "Find", "UMI", "file", "using", "different", "naming", "schemes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_fastq_umi_prep.py#L176-L194
224,016
bcbio/bcbio-nextgen
scripts/bcbio_fastq_umi_prep.py
_commonprefix
def _commonprefix(files): """Retrieve a common prefix for files without extra _R1 _I1 extensions. Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1). """ out = os.path.commonprefix(files) out = out.rstrip("_R") out = out.rstrip("_I") out = out.rstrip("_") return out
python
def _commonprefix(files): """Retrieve a common prefix for files without extra _R1 _I1 extensions. Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1). """ out = os.path.commonprefix(files) out = out.rstrip("_R") out = out.rstrip("_I") out = out.rstrip("_") return out
[ "def", "_commonprefix", "(", "files", ")", ":", "out", "=", "os", ".", "path", ".", "commonprefix", "(", "files", ")", "out", "=", "out", ".", "rstrip", "(", "\"_R\"", ")", "out", "=", "out", ".", "rstrip", "(", "\"_I\"", ")", "out", "=", "out", ...
Retrieve a common prefix for files without extra _R1 _I1 extensions. Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1).
[ "Retrieve", "a", "common", "prefix", "for", "files", "without", "extra", "_R1", "_I1", "extensions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_fastq_umi_prep.py#L196-L205
224,017
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
cutoff_w_expression
def cutoff_w_expression(vcf_file, expression, data, name="+", filterext="", extra_cmd="", limit_regions="variant_regions"): """Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4. """ base, ext = utils.splitext_plus(vcf_file) out_file = "{base}-filter{filterext}{ext}".format(**locals()) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: if vcfutils.vcf_has_variants(vcf_file): bcftools = config_utils.get_program("bcftools", data["config"]) bgzip_cmd = "| bgzip -c" if out_file.endswith(".gz") else "" intervals = "" if limit_regions == "variant_regions": variant_regions = dd.get_variant_regions(data) if variant_regions: intervals = "-T %s" % vcfutils.bgzip_and_index(variant_regions, data["config"]) cmd = ("{bcftools} filter -O v {intervals} --soft-filter '{name}' " "-e '{expression}' -m '+' {vcf_file} {extra_cmd} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Cutoff-based soft filtering %s with %s" % (vcf_file, expression), data) else: shutil.copy(vcf_file, out_file) if out_file.endswith(".vcf.gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file
python
def cutoff_w_expression(vcf_file, expression, data, name="+", filterext="", extra_cmd="", limit_regions="variant_regions"): """Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4. """ base, ext = utils.splitext_plus(vcf_file) out_file = "{base}-filter{filterext}{ext}".format(**locals()) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: if vcfutils.vcf_has_variants(vcf_file): bcftools = config_utils.get_program("bcftools", data["config"]) bgzip_cmd = "| bgzip -c" if out_file.endswith(".gz") else "" intervals = "" if limit_regions == "variant_regions": variant_regions = dd.get_variant_regions(data) if variant_regions: intervals = "-T %s" % vcfutils.bgzip_and_index(variant_regions, data["config"]) cmd = ("{bcftools} filter -O v {intervals} --soft-filter '{name}' " "-e '{expression}' -m '+' {vcf_file} {extra_cmd} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Cutoff-based soft filtering %s with %s" % (vcf_file, expression), data) else: shutil.copy(vcf_file, out_file) if out_file.endswith(".vcf.gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file
[ "def", "cutoff_w_expression", "(", "vcf_file", ",", "expression", ",", "data", ",", "name", "=", "\"+\"", ",", "filterext", "=", "\"\"", ",", "extra_cmd", "=", "\"\"", ",", "limit_regions", "=", "\"variant_regions\"", ")", ":", "base", ",", "ext", "=", "ut...
Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4.
[ "Perform", "cutoff", "-", "based", "soft", "filtering", "using", "bcftools", "expressions", "like", "%QUAL", "<", "20", "||", "DP", "<", "4", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L21-L45
224,018
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
_freebayes_custom
def _freebayes_custom(in_file, ref_file, data): """Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results. Experimental: for testing new methods. """ if vcfutils.get_paired_phenotype(data): return None config = data["config"] bv_ver = programs.get_version("bcbio_variation", config=config) if LooseVersion(bv_ver) < LooseVersion("0.1.1"): return None out_file = "%s-filter%s" % os.path.splitext(in_file) if not utils.file_exists(out_file): tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp")) resources = config_utils.get_resources("bcbio_variation", config) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"]) java_args = ["-Djava.io.tmpdir=%s" % tmp_dir] cmd = ["bcbio-variation"] + jvm_opts + java_args + \ ["variant-filter", "freebayes", in_file, ref_file] do.run(cmd, "Custom FreeBayes filtering using bcbio.variation") return out_file
python
def _freebayes_custom(in_file, ref_file, data): """Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results. Experimental: for testing new methods. """ if vcfutils.get_paired_phenotype(data): return None config = data["config"] bv_ver = programs.get_version("bcbio_variation", config=config) if LooseVersion(bv_ver) < LooseVersion("0.1.1"): return None out_file = "%s-filter%s" % os.path.splitext(in_file) if not utils.file_exists(out_file): tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp")) resources = config_utils.get_resources("bcbio_variation", config) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"]) java_args = ["-Djava.io.tmpdir=%s" % tmp_dir] cmd = ["bcbio-variation"] + jvm_opts + java_args + \ ["variant-filter", "freebayes", in_file, ref_file] do.run(cmd, "Custom FreeBayes filtering using bcbio.variation") return out_file
[ "def", "_freebayes_custom", "(", "in_file", ",", "ref_file", ",", "data", ")", ":", "if", "vcfutils", ".", "get_paired_phenotype", "(", "data", ")", ":", "return", "None", "config", "=", "data", "[", "\"config\"", "]", "bv_ver", "=", "programs", ".", "get_...
Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results. Experimental: for testing new methods.
[ "Custom", "FreeBayes", "filtering", "using", "bcbio", ".", "variation", "tuned", "to", "human", "NA12878", "results", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L56-L76
224,019
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
_freebayes_cutoff
def _freebayes_cutoff(in_file, data): """Perform filtering of FreeBayes results, flagging low confidence calls. Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity of homozygote and heterozygote calling on depth: http://www.ncbi.nlm.nih.gov/pubmed/23773188 and high depth heterozygote SNP filtering based on Heng Li's work evaluating variant calling artifacts: http://arxiv.org/abs/1404.0929 Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome. """ if not vcfutils.vcf_has_variants(in_file): base, ext = utils.splitext_plus(in_file) out_file = "{base}-filter{ext}".format(**locals()) if not utils.file_exists(out_file): shutil.copy(in_file, out_file) if out_file.endswith(".vcf.gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file depth_thresh, qual_thresh = None, None if _do_high_depth_filter(data): stats = _calc_vcf_stats(in_file) if stats["avg_depth"] > 0: depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5))) qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter filters = ('(AF[0] <= 0.5 && (max(FORMAT/DP) < 4 || (max(FORMAT/DP) < 13 && %QUAL < 10))) || ' '(AF[0] > 0.5 && (max(FORMAT/DP) < 4 && %QUAL < 50))') if depth_thresh: filters += ' || (%QUAL < {qual_thresh} && max(FORMAT/DP) > {depth_thresh} && AF[0] <= 0.5)'.format(**locals()) return cutoff_w_expression(in_file, filters, data, name="FBQualDepth")
python
def _freebayes_cutoff(in_file, data): """Perform filtering of FreeBayes results, flagging low confidence calls. Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity of homozygote and heterozygote calling on depth: http://www.ncbi.nlm.nih.gov/pubmed/23773188 and high depth heterozygote SNP filtering based on Heng Li's work evaluating variant calling artifacts: http://arxiv.org/abs/1404.0929 Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome. """ if not vcfutils.vcf_has_variants(in_file): base, ext = utils.splitext_plus(in_file) out_file = "{base}-filter{ext}".format(**locals()) if not utils.file_exists(out_file): shutil.copy(in_file, out_file) if out_file.endswith(".vcf.gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file depth_thresh, qual_thresh = None, None if _do_high_depth_filter(data): stats = _calc_vcf_stats(in_file) if stats["avg_depth"] > 0: depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5))) qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter filters = ('(AF[0] <= 0.5 && (max(FORMAT/DP) < 4 || (max(FORMAT/DP) < 13 && %QUAL < 10))) || ' '(AF[0] > 0.5 && (max(FORMAT/DP) < 4 && %QUAL < 50))') if depth_thresh: filters += ' || (%QUAL < {qual_thresh} && max(FORMAT/DP) > {depth_thresh} && AF[0] <= 0.5)'.format(**locals()) return cutoff_w_expression(in_file, filters, data, name="FBQualDepth")
[ "def", "_freebayes_cutoff", "(", "in_file", ",", "data", ")", ":", "if", "not", "vcfutils", ".", "vcf_has_variants", "(", "in_file", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "in_file", ")", "out_file", "=", "\"{base}-filter{ext}...
Perform filtering of FreeBayes results, flagging low confidence calls. Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity of homozygote and heterozygote calling on depth: http://www.ncbi.nlm.nih.gov/pubmed/23773188 and high depth heterozygote SNP filtering based on Heng Li's work evaluating variant calling artifacts: http://arxiv.org/abs/1404.0929 Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
[ "Perform", "filtering", "of", "FreeBayes", "results", "flagging", "low", "confidence", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L78-L112
224,020
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
_calc_vcf_stats
def _calc_vcf_stats(in_file): """Calculate statistics on VCF for filtering, saving to a file for quick re-runs. """ out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0] if not utils.file_exists(out_file): stats = {"avg_depth": _average_called_depth(in_file)} with open(out_file, "w") as out_handle: yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False) return stats else: with open(out_file) as in_handle: stats = yaml.safe_load(in_handle) return stats
python
def _calc_vcf_stats(in_file): """Calculate statistics on VCF for filtering, saving to a file for quick re-runs. """ out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0] if not utils.file_exists(out_file): stats = {"avg_depth": _average_called_depth(in_file)} with open(out_file, "w") as out_handle: yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False) return stats else: with open(out_file) as in_handle: stats = yaml.safe_load(in_handle) return stats
[ "def", "_calc_vcf_stats", "(", "in_file", ")", ":", "out_file", "=", "\"%s-stats.yaml\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "stats", "=", "{", "\"a...
Calculate statistics on VCF for filtering, saving to a file for quick re-runs.
[ "Calculate", "statistics", "on", "VCF", "for", "filtering", "saving", "to", "a", "file", "for", "quick", "re", "-", "runs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L121-L133
224,021
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
_average_called_depth
def _average_called_depth(in_file): """Retrieve the average depth of called reads in the provided VCF. """ import cyvcf2 depths = [] for rec in cyvcf2.VCF(str(in_file)): d = rec.INFO.get("DP") if d is not None: depths.append(int(d)) if len(depths) > 0: return int(math.ceil(numpy.mean(depths))) else: return 0
python
def _average_called_depth(in_file): """Retrieve the average depth of called reads in the provided VCF. """ import cyvcf2 depths = [] for rec in cyvcf2.VCF(str(in_file)): d = rec.INFO.get("DP") if d is not None: depths.append(int(d)) if len(depths) > 0: return int(math.ceil(numpy.mean(depths))) else: return 0
[ "def", "_average_called_depth", "(", "in_file", ")", ":", "import", "cyvcf2", "depths", "=", "[", "]", "for", "rec", "in", "cyvcf2", ".", "VCF", "(", "str", "(", "in_file", ")", ")", ":", "d", "=", "rec", ".", "INFO", ".", "get", "(", "\"DP\"", ")"...
Retrieve the average depth of called reads in the provided VCF.
[ "Retrieve", "the", "average", "depth", "of", "called", "reads", "in", "the", "provided", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L135-L147
224,022
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
platypus
def platypus(in_file, data): """Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter. Platypus uses its own VCF nomenclature: TC == DP, FR == AF Platypus gVCF output appears to have an 0/1 index problem so the reference block regions are 1 base outside regions of interest. We avoid limiting regions during filtering when using it. """ filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || ' '(TC < 13 && %QUAL < 10) || ' '(FR[0] > 0.5 && TC < 4 && %QUAL < 50)') limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth", extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions)
python
def platypus(in_file, data): """Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter. Platypus uses its own VCF nomenclature: TC == DP, FR == AF Platypus gVCF output appears to have an 0/1 index problem so the reference block regions are 1 base outside regions of interest. We avoid limiting regions during filtering when using it. """ filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || ' '(TC < 13 && %QUAL < 10) || ' '(FR[0] > 0.5 && TC < 4 && %QUAL < 50)') limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth", extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions)
[ "def", "platypus", "(", "in_file", ",", "data", ")", ":", "filters", "=", "(", "'(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '", "'(TC < 13 && %QUAL < 10) || '", "'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)'", ")", "limit_regions", "=", "\"variant_regions\"", "if", "not", "vcfutils"...
Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter. Platypus uses its own VCF nomenclature: TC == DP, FR == AF Platypus gVCF output appears to have an 0/1 index problem so the reference block regions are 1 base outside regions of interest. We avoid limiting regions during filtering when using it.
[ "Filter", "Platypus", "calls", "removing", "Q20", "filter", "and", "replacing", "with", "depth", "and", "quality", "based", "filter", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L149-L163
224,023
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
gatk_snp_cutoff
def gatk_snp_cutoff(in_file, data): """Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations. We have a more lenient mapping quality (MQ) filter compared to GATK defaults. The recommended filter (MQ < 40) is too stringent, so we adjust to 30: http://imgur.com/a/oHRVB QD and FS are not calculated when generating gVCF output: https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300 The extra command removes escaped quotes in the VCF output which pyVCF fails on. Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it has a negative impact on sensitivity relative to precision: https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38 """ filters = ["MQRankSum < -12.5", "ReadPosRankSum < -8.0"] # GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores # resulting in excessive filtering, so avoid this metric variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller")) if variantcaller not in ["gatk-haplotype", "haplotyper"]: filters.append("HaplotypeScore > 13.0") # Additional filter metrics, unless using raw GATK HaplotypeCaller or Sentieon gVCFs if not (vcfutils.is_gvcf_file(in_file) and variantcaller in ["gatk-haplotype", "haplotyper"]): filters += ["QD < 2.0"] filters += ["FS > 60.0"] filters += _gatk_general() filters += ["MQ < 30.0"] return cutoff_w_expression(in_file, 'TYPE="snp" && (%s)' % " || ".join(filters), data, "GATKCutoffSNP", "SNP", extra_cmd=r"""| sed 's/\\"//g'""")
python
def gatk_snp_cutoff(in_file, data): """Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations. We have a more lenient mapping quality (MQ) filter compared to GATK defaults. The recommended filter (MQ < 40) is too stringent, so we adjust to 30: http://imgur.com/a/oHRVB QD and FS are not calculated when generating gVCF output: https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300 The extra command removes escaped quotes in the VCF output which pyVCF fails on. Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it has a negative impact on sensitivity relative to precision: https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38 """ filters = ["MQRankSum < -12.5", "ReadPosRankSum < -8.0"] # GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores # resulting in excessive filtering, so avoid this metric variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller")) if variantcaller not in ["gatk-haplotype", "haplotyper"]: filters.append("HaplotypeScore > 13.0") # Additional filter metrics, unless using raw GATK HaplotypeCaller or Sentieon gVCFs if not (vcfutils.is_gvcf_file(in_file) and variantcaller in ["gatk-haplotype", "haplotyper"]): filters += ["QD < 2.0"] filters += ["FS > 60.0"] filters += _gatk_general() filters += ["MQ < 30.0"] return cutoff_w_expression(in_file, 'TYPE="snp" && (%s)' % " || ".join(filters), data, "GATKCutoffSNP", "SNP", extra_cmd=r"""| sed 's/\\"//g'""")
[ "def", "gatk_snp_cutoff", "(", "in_file", ",", "data", ")", ":", "filters", "=", "[", "\"MQRankSum < -12.5\"", ",", "\"ReadPosRankSum < -8.0\"", "]", "# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores", "# resulting in excessive filtering, so avoid this metri...
Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations. We have a more lenient mapping quality (MQ) filter compared to GATK defaults. The recommended filter (MQ < 40) is too stringent, so we adjust to 30: http://imgur.com/a/oHRVB QD and FS are not calculated when generating gVCF output: https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300 The extra command removes escaped quotes in the VCF output which pyVCF fails on. Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it has a negative impact on sensitivity relative to precision: https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38
[ "Perform", "cutoff", "-", "based", "soft", "filtering", "on", "GATK", "SNPs", "using", "best", "-", "practice", "recommendations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L183-L214
224,024
bcbio/bcbio-nextgen
bcbio/bam/counts.py
random_regions
def random_regions(base, n, size): """Generate n random regions of 'size' in the provided base spread. """ spread = size // 2 base_info = collections.defaultdict(list) for space, start, end in base: base_info[space].append(start + spread) base_info[space].append(end - spread) regions = [] for _ in range(n): space = random.choice(base_info.keys()) pos = random.randint(min(base_info[space]), max(base_info[space])) regions.append([space, pos-spread, pos+spread]) return regions
python
def random_regions(base, n, size): """Generate n random regions of 'size' in the provided base spread. """ spread = size // 2 base_info = collections.defaultdict(list) for space, start, end in base: base_info[space].append(start + spread) base_info[space].append(end - spread) regions = [] for _ in range(n): space = random.choice(base_info.keys()) pos = random.randint(min(base_info[space]), max(base_info[space])) regions.append([space, pos-spread, pos+spread]) return regions
[ "def", "random_regions", "(", "base", ",", "n", ",", "size", ")", ":", "spread", "=", "size", "//", "2", "base_info", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "space", ",", "start", ",", "end", "in", "base", ":", "base_info", ...
Generate n random regions of 'size' in the provided base spread.
[ "Generate", "n", "random", "regions", "of", "size", "in", "the", "provided", "base", "spread", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L52-L65
224,025
bcbio/bcbio-nextgen
bcbio/bam/counts.py
NormalizedBam.all_regions
def all_regions(self): """Get a tuple of all chromosome, start and end regions. """ regions = [] for sq in self._bam.header["SQ"]: regions.append((sq["SN"], 1, int(sq["LN"]))) return regions
python
def all_regions(self): """Get a tuple of all chromosome, start and end regions. """ regions = [] for sq in self._bam.header["SQ"]: regions.append((sq["SN"], 1, int(sq["LN"]))) return regions
[ "def", "all_regions", "(", "self", ")", ":", "regions", "=", "[", "]", "for", "sq", "in", "self", ".", "_bam", ".", "header", "[", "\"SQ\"", "]", ":", "regions", ".", "append", "(", "(", "sq", "[", "\"SN\"", "]", ",", "1", ",", "int", "(", "sq"...
Get a tuple of all chromosome, start and end regions.
[ "Get", "a", "tuple", "of", "all", "chromosome", "start", "and", "end", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L25-L31
224,026
bcbio/bcbio-nextgen
bcbio/bam/counts.py
NormalizedBam.read_count
def read_count(self, space, start, end): """Retrieve the normalized read count in the provided region. """ read_counts = 0 for read in self._bam.fetch(space, start, end): read_counts += 1 return self._normalize(read_counts, self._total)
python
def read_count(self, space, start, end): """Retrieve the normalized read count in the provided region. """ read_counts = 0 for read in self._bam.fetch(space, start, end): read_counts += 1 return self._normalize(read_counts, self._total)
[ "def", "read_count", "(", "self", ",", "space", ",", "start", ",", "end", ")", ":", "read_counts", "=", "0", "for", "read", "in", "self", ".", "_bam", ".", "fetch", "(", "space", ",", "start", ",", "end", ")", ":", "read_counts", "+=", "1", "return...
Retrieve the normalized read count in the provided region.
[ "Retrieve", "the", "normalized", "read", "count", "in", "the", "provided", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L33-L39
224,027
bcbio/bcbio-nextgen
bcbio/bam/counts.py
NormalizedBam.coverage_pileup
def coverage_pileup(self, space, start, end): """Retrieve pileup coverage across a specified region. """ return ((col.pos, self._normalize(col.n, self._total)) for col in self._bam.pileup(space, start, end))
python
def coverage_pileup(self, space, start, end): """Retrieve pileup coverage across a specified region. """ return ((col.pos, self._normalize(col.n, self._total)) for col in self._bam.pileup(space, start, end))
[ "def", "coverage_pileup", "(", "self", ",", "space", ",", "start", ",", "end", ")", ":", "return", "(", "(", "col", ".", "pos", ",", "self", ".", "_normalize", "(", "col", ".", "n", ",", "self", ".", "_total", ")", ")", "for", "col", "in", "self"...
Retrieve pileup coverage across a specified region.
[ "Retrieve", "pileup", "coverage", "across", "a", "specified", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L41-L45
224,028
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_prepare_summary
def _prepare_summary(evolve_file, ssm_file, cnv_file, work_dir, somatic_info): """Prepare a summary with gene-labelled heterogeneity from PhyloWGS predictions. """ out_file = os.path.join(work_dir, "%s-phylowgs.txt" % somatic_info.tumor_name) if not utils.file_uptodate(out_file, evolve_file): with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: ssm_locs = _read_ssm_locs(ssm_file) cnv_ssms = _read_cnv_ssms(cnv_file) for i, (ids, tree) in enumerate(_evolve_reader(evolve_file)): out_handle.write("* Tree %s\n" % (i + 1)) out_handle.write("\n" + "\n".join(tree) + "\n\n") for nid, freq, gids in ids: genes = _gids_to_genes(gids, ssm_locs, cnv_ssms, somatic_info.tumor_data) out_handle.write("%s\t%s\t%s\n" % (nid, freq, ",".join(genes))) out_handle.write("\n") return out_file
python
def _prepare_summary(evolve_file, ssm_file, cnv_file, work_dir, somatic_info): """Prepare a summary with gene-labelled heterogeneity from PhyloWGS predictions. """ out_file = os.path.join(work_dir, "%s-phylowgs.txt" % somatic_info.tumor_name) if not utils.file_uptodate(out_file, evolve_file): with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: ssm_locs = _read_ssm_locs(ssm_file) cnv_ssms = _read_cnv_ssms(cnv_file) for i, (ids, tree) in enumerate(_evolve_reader(evolve_file)): out_handle.write("* Tree %s\n" % (i + 1)) out_handle.write("\n" + "\n".join(tree) + "\n\n") for nid, freq, gids in ids: genes = _gids_to_genes(gids, ssm_locs, cnv_ssms, somatic_info.tumor_data) out_handle.write("%s\t%s\t%s\n" % (nid, freq, ",".join(genes))) out_handle.write("\n") return out_file
[ "def", "_prepare_summary", "(", "evolve_file", ",", "ssm_file", ",", "cnv_file", ",", "work_dir", ",", "somatic_info", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-phylowgs.txt\"", "%", "somatic_info", ".", "tumor_name...
Prepare a summary with gene-labelled heterogeneity from PhyloWGS predictions.
[ "Prepare", "a", "summary", "with", "gene", "-", "labelled", "heterogeneity", "from", "PhyloWGS", "predictions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L39-L55
224,029
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_gids_to_genes
def _gids_to_genes(gids, ssm_locs, cnv_ssms, data): """Convert support ids for SNPs and SSMs into associated genes. """ locs = collections.defaultdict(set) for gid in gids: cur_locs = [] try: cur_locs.append(ssm_locs[gid]) except KeyError: for ssm_loc in cnv_ssms.get(gid, []): cur_locs.append(ssm_locs[ssm_loc]) for chrom, pos in cur_locs: locs[chrom].add(pos) genes = set([]) with tx_tmpdir(data) as tmpdir: chrom_prefix = "chr" if next(ref.file_contigs(dd.get_ref_file(data))).name.startswith("chr") else "" loc_file = os.path.join(tmpdir, "battenberg_find_genes.bed") with open(loc_file, "w") as out_handle: for chrom in sorted(locs.keys()): for loc in sorted(list(locs[chrom])): out_handle.write("%s%s\t%s\t%s\n" % (chrom_prefix, chrom, loc - 1, loc)) ann_file = annotate.add_genes(loc_file, data, max_distance=10000) for r in pybedtools.BedTool(ann_file): for gene in r.name.split(","): if gene != ".": genes.add(gene) return sorted(list(genes))
python
def _gids_to_genes(gids, ssm_locs, cnv_ssms, data): """Convert support ids for SNPs and SSMs into associated genes. """ locs = collections.defaultdict(set) for gid in gids: cur_locs = [] try: cur_locs.append(ssm_locs[gid]) except KeyError: for ssm_loc in cnv_ssms.get(gid, []): cur_locs.append(ssm_locs[ssm_loc]) for chrom, pos in cur_locs: locs[chrom].add(pos) genes = set([]) with tx_tmpdir(data) as tmpdir: chrom_prefix = "chr" if next(ref.file_contigs(dd.get_ref_file(data))).name.startswith("chr") else "" loc_file = os.path.join(tmpdir, "battenberg_find_genes.bed") with open(loc_file, "w") as out_handle: for chrom in sorted(locs.keys()): for loc in sorted(list(locs[chrom])): out_handle.write("%s%s\t%s\t%s\n" % (chrom_prefix, chrom, loc - 1, loc)) ann_file = annotate.add_genes(loc_file, data, max_distance=10000) for r in pybedtools.BedTool(ann_file): for gene in r.name.split(","): if gene != ".": genes.add(gene) return sorted(list(genes))
[ "def", "_gids_to_genes", "(", "gids", ",", "ssm_locs", ",", "cnv_ssms", ",", "data", ")", ":", "locs", "=", "collections", ".", "defaultdict", "(", "set", ")", "for", "gid", "in", "gids", ":", "cur_locs", "=", "[", "]", "try", ":", "cur_locs", ".", "...
Convert support ids for SNPs and SSMs into associated genes.
[ "Convert", "support", "ids", "for", "SNPs", "and", "SSMs", "into", "associated", "genes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L57-L83
224,030
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_evolve_reader
def _evolve_reader(in_file): """Generate a list of region IDs and trees from a top_k_trees evolve.py file. """ cur_id_list = None cur_tree = None with open(in_file) as in_handle: for line in in_handle: if line.startswith("id,"): if cur_id_list: yield cur_id_list, cur_tree cur_id_list = [] cur_tree = None elif cur_tree is not None: if line.strip() and not line.startswith("Number of non-empty"): cur_tree.append(line.rstrip()) elif not line.strip() and cur_id_list and len(cur_id_list) > 0: cur_tree = [] elif line.strip(): parts = [] for part in line.strip().split("\t"): if part.endswith(","): part = part[:-1] parts.append(part) if len(parts) > 4: nid, freq, _, _, support = parts cur_id_list.append((nid, freq, support.split("; "))) if cur_id_list: yield cur_id_list, cur_tree
python
def _evolve_reader(in_file): """Generate a list of region IDs and trees from a top_k_trees evolve.py file. """ cur_id_list = None cur_tree = None with open(in_file) as in_handle: for line in in_handle: if line.startswith("id,"): if cur_id_list: yield cur_id_list, cur_tree cur_id_list = [] cur_tree = None elif cur_tree is not None: if line.strip() and not line.startswith("Number of non-empty"): cur_tree.append(line.rstrip()) elif not line.strip() and cur_id_list and len(cur_id_list) > 0: cur_tree = [] elif line.strip(): parts = [] for part in line.strip().split("\t"): if part.endswith(","): part = part[:-1] parts.append(part) if len(parts) > 4: nid, freq, _, _, support = parts cur_id_list.append((nid, freq, support.split("; "))) if cur_id_list: yield cur_id_list, cur_tree
[ "def", "_evolve_reader", "(", "in_file", ")", ":", "cur_id_list", "=", "None", "cur_tree", "=", "None", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"id,\"", "...
Generate a list of region IDs and trees from a top_k_trees evolve.py file.
[ "Generate", "a", "list", "of", "region", "IDs", "and", "trees", "from", "a", "top_k_trees", "evolve", ".", "py", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L85-L112
224,031
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_read_cnv_ssms
def _read_cnv_ssms(in_file): """Map CNVs to associated SSMs """ out = {} with open(in_file) as in_handle: in_handle.readline() # header for line in in_handle: parts = line.strip().split() if len(parts) > 3: cnvid, _, _, ssms = parts out[cnvid] = [x.split(",")[0] for x in ssms.split(";")] return out
python
def _read_cnv_ssms(in_file): """Map CNVs to associated SSMs """ out = {} with open(in_file) as in_handle: in_handle.readline() # header for line in in_handle: parts = line.strip().split() if len(parts) > 3: cnvid, _, _, ssms = parts out[cnvid] = [x.split(",")[0] for x in ssms.split(";")] return out
[ "def", "_read_cnv_ssms", "(", "in_file", ")", ":", "out", "=", "{", "}", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "in_handle", ".", "readline", "(", ")", "# header", "for", "line", "in", "in_handle", ":", "parts", "=", "line", ".", ...
Map CNVs to associated SSMs
[ "Map", "CNVs", "to", "associated", "SSMs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L114-L125
224,032
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_read_ssm_locs
def _read_ssm_locs(in_file): """Map SSMs to chromosomal locations. """ out = {} with open(in_file) as in_handle: in_handle.readline() # header for line in in_handle: sid, loc = line.split()[:2] chrom, pos = loc.split("_") out[sid] = (chrom, int(pos)) return out
python
def _read_ssm_locs(in_file): """Map SSMs to chromosomal locations. """ out = {} with open(in_file) as in_handle: in_handle.readline() # header for line in in_handle: sid, loc = line.split()[:2] chrom, pos = loc.split("_") out[sid] = (chrom, int(pos)) return out
[ "def", "_read_ssm_locs", "(", "in_file", ")", ":", "out", "=", "{", "}", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "in_handle", ".", "readline", "(", ")", "# header", "for", "line", "in", "in_handle", ":", "sid", ",", "loc", "=", "...
Map SSMs to chromosomal locations.
[ "Map", "SSMs", "to", "chromosomal", "locations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L127-L137
224,033
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_run_evolve
def _run_evolve(ssm_file, cnv_file, work_dir, data): """Run evolve.py to infer subclonal composition. """ exe = os.path.join(os.path.dirname(sys.executable), "evolve.py") assert os.path.exists(exe), "Could not find evolve script for PhyloWGS runs." out_dir = os.path.join(work_dir, "evolve") out_file = os.path.join(out_dir, "top_k_trees") if not utils.file_uptodate(out_file, cnv_file): with file_transaction(data, out_dir) as tx_out_dir: with utils.chdir(tx_out_dir): cmd = [sys.executable, exe, "-r", "42", ssm_file, cnv_file] do.run(cmd, "Run PhyloWGS evolution") return out_file
python
def _run_evolve(ssm_file, cnv_file, work_dir, data): """Run evolve.py to infer subclonal composition. """ exe = os.path.join(os.path.dirname(sys.executable), "evolve.py") assert os.path.exists(exe), "Could not find evolve script for PhyloWGS runs." out_dir = os.path.join(work_dir, "evolve") out_file = os.path.join(out_dir, "top_k_trees") if not utils.file_uptodate(out_file, cnv_file): with file_transaction(data, out_dir) as tx_out_dir: with utils.chdir(tx_out_dir): cmd = [sys.executable, exe, "-r", "42", ssm_file, cnv_file] do.run(cmd, "Run PhyloWGS evolution") return out_file
[ "def", "_run_evolve", "(", "ssm_file", ",", "cnv_file", ",", "work_dir", ",", "data", ")", ":", "exe", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "executable", ")", ",", "\"evolve.py\"", ")", "assert...
Run evolve.py to infer subclonal composition.
[ "Run", "evolve", ".", "py", "to", "infer", "subclonal", "composition", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L139-L151
224,034
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_prep_inputs
def _prep_inputs(vrn_info, cnv_info, somatic_info, work_dir, config): """Prepare inputs for running PhyloWGS from variant and CNV calls. """ exe = os.path.join(os.path.dirname(sys.executable), "create_phylowgs_inputs.py") assert os.path.exists(exe), "Could not find input prep script for PhyloWGS runs." ssm_file = os.path.join(work_dir, "ssm_data.txt") cnv_file = os.path.join(work_dir, "cnv_data.txt") if not utils.file_exists(ssm_file) or not utils.file_exists(cnv_file): with file_transaction(somatic_info.tumor_data, ssm_file, cnv_file) as (tx_ssm_file, tx_cnv_file): variant_type, input_vcf_file = _prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"], work_dir, somatic_info, cnv_info["ignore"], config) input_cnv_file = _prep_cnv_file(cnv_info["subclones"], work_dir, somatic_info) cmd = [sys.executable, exe, "--sample-size", str(config["sample_size"]), "--tumor-sample", somatic_info.tumor_name, "--battenberg", input_cnv_file, "--cellularity", _read_contam(cnv_info["contamination"]), "--output-cnvs", tx_cnv_file, "--output-variants", tx_ssm_file, "--variant-type", variant_type, input_vcf_file] do.run(cmd, "Prepare PhyloWGS inputs.") return ssm_file, cnv_file
python
def _prep_inputs(vrn_info, cnv_info, somatic_info, work_dir, config): """Prepare inputs for running PhyloWGS from variant and CNV calls. """ exe = os.path.join(os.path.dirname(sys.executable), "create_phylowgs_inputs.py") assert os.path.exists(exe), "Could not find input prep script for PhyloWGS runs." ssm_file = os.path.join(work_dir, "ssm_data.txt") cnv_file = os.path.join(work_dir, "cnv_data.txt") if not utils.file_exists(ssm_file) or not utils.file_exists(cnv_file): with file_transaction(somatic_info.tumor_data, ssm_file, cnv_file) as (tx_ssm_file, tx_cnv_file): variant_type, input_vcf_file = _prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"], work_dir, somatic_info, cnv_info["ignore"], config) input_cnv_file = _prep_cnv_file(cnv_info["subclones"], work_dir, somatic_info) cmd = [sys.executable, exe, "--sample-size", str(config["sample_size"]), "--tumor-sample", somatic_info.tumor_name, "--battenberg", input_cnv_file, "--cellularity", _read_contam(cnv_info["contamination"]), "--output-cnvs", tx_cnv_file, "--output-variants", tx_ssm_file, "--variant-type", variant_type, input_vcf_file] do.run(cmd, "Prepare PhyloWGS inputs.") return ssm_file, cnv_file
[ "def", "_prep_inputs", "(", "vrn_info", ",", "cnv_info", ",", "somatic_info", ",", "work_dir", ",", "config", ")", ":", "exe", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "executable", ")", ",", "\"cr...
Prepare inputs for running PhyloWGS from variant and CNV calls.
[ "Prepare", "inputs", "for", "running", "PhyloWGS", "from", "variant", "and", "CNV", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L153-L171
224,035
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_prep_cnv_file
def _prep_cnv_file(in_file, work_dir, somatic_info): """Prepare Battenberg CNV file for ingest by PhyloWGS. The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style) correctly. This converts them over to GRCh37 (no 'chr') style to match preparation work in _prep_vrn_file. """ out_file = os.path.join(work_dir, "%s-prep%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(in_handle.readline()) # header for line in in_handle: parts = line.split("\t") parts[1] = _phylowgs_compatible_chroms(parts[1]) out_handle.write("\t".join(parts)) return out_file
python
def _prep_cnv_file(in_file, work_dir, somatic_info): """Prepare Battenberg CNV file for ingest by PhyloWGS. The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style) correctly. This converts them over to GRCh37 (no 'chr') style to match preparation work in _prep_vrn_file. """ out_file = os.path.join(work_dir, "%s-prep%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(in_handle.readline()) # header for line in in_handle: parts = line.split("\t") parts[1] = _phylowgs_compatible_chroms(parts[1]) out_handle.write("\t".join(parts)) return out_file
[ "def", "_prep_cnv_file", "(", "in_file", ",", "work_dir", ",", "somatic_info", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-prep%s\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", ...
Prepare Battenberg CNV file for ingest by PhyloWGS. The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style) correctly. This converts them over to GRCh37 (no 'chr') style to match preparation work in _prep_vrn_file.
[ "Prepare", "Battenberg", "CNV", "file", "for", "ingest", "by", "PhyloWGS", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L173-L190
224,036
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_prep_vrn_file
def _prep_vrn_file(in_file, vcaller, work_dir, somatic_info, ignore_file, config): """Create a variant file to feed into the PhyloWGS prep script, limiting records. Sorts by depth, adding top covered samples up to the sample_size supported by PhyloWGS. The logic is that the higher depth samples will have better resolution for frequency differences. More complex implementations could try to subset based on a distribution of frequencies to best sample the potential heterogeneity. Handles MuTect and VarDict as inputs to PhyloWGS. Fixes chromosome naming to use non chr-prefixed contigs, to match _prep_cnv_file. """ if vcaller.startswith("vardict"): variant_type = "vardict" elif vcaller == "mutect": variant_type = "mutect-smchet" else: raise ValueError("Unexpected variant caller for PhyloWGS prep: %s" % vcaller) out_file = os.path.join(work_dir, "%s-%s-prep.vcf" % (utils.splitext_plus(os.path.basename(in_file))[0], vcaller)) if not utils.file_uptodate(out_file, in_file): check_fn = _min_sample_pass(ignore_file) with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: tx_out_file_raw = "%s-raw%s" % utils.splitext_plus(tx_out_file) # Filter inputs with VariantFile(in_file) as bcf_in: depths = [_sample_depth(rec, somatic_info.tumor_name) for rec in filter(check_fn, bcf_in)] depths.sort(reverse=True) depth_thresh = depths[:config["sample_size"]][-1] if depths else 0 with VariantFile(in_file) as bcf_in: with VariantFile(tx_out_file_raw, "w", header=bcf_in.header) as bcf_out: for rec in bcf_in: if (check_fn(rec) and (depth_thresh < 5 or _sample_depth(rec, somatic_info.tumor_name) >= depth_thresh)): bcf_out.write(rec) # Fix potential chromosome issues with open(tx_out_file_raw) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: if not line.startswith("#"): parts = line.split("\t") parts[0] = _phylowgs_compatible_chroms(parts[0]) line = "\t".join(parts) out_handle.write(line) return variant_type, out_file
python
def _prep_vrn_file(in_file, vcaller, work_dir, somatic_info, ignore_file, config): """Create a variant file to feed into the PhyloWGS prep script, limiting records. Sorts by depth, adding top covered samples up to the sample_size supported by PhyloWGS. The logic is that the higher depth samples will have better resolution for frequency differences. More complex implementations could try to subset based on a distribution of frequencies to best sample the potential heterogeneity. Handles MuTect and VarDict as inputs to PhyloWGS. Fixes chromosome naming to use non chr-prefixed contigs, to match _prep_cnv_file. """ if vcaller.startswith("vardict"): variant_type = "vardict" elif vcaller == "mutect": variant_type = "mutect-smchet" else: raise ValueError("Unexpected variant caller for PhyloWGS prep: %s" % vcaller) out_file = os.path.join(work_dir, "%s-%s-prep.vcf" % (utils.splitext_plus(os.path.basename(in_file))[0], vcaller)) if not utils.file_uptodate(out_file, in_file): check_fn = _min_sample_pass(ignore_file) with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: tx_out_file_raw = "%s-raw%s" % utils.splitext_plus(tx_out_file) # Filter inputs with VariantFile(in_file) as bcf_in: depths = [_sample_depth(rec, somatic_info.tumor_name) for rec in filter(check_fn, bcf_in)] depths.sort(reverse=True) depth_thresh = depths[:config["sample_size"]][-1] if depths else 0 with VariantFile(in_file) as bcf_in: with VariantFile(tx_out_file_raw, "w", header=bcf_in.header) as bcf_out: for rec in bcf_in: if (check_fn(rec) and (depth_thresh < 5 or _sample_depth(rec, somatic_info.tumor_name) >= depth_thresh)): bcf_out.write(rec) # Fix potential chromosome issues with open(tx_out_file_raw) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: if not line.startswith("#"): parts = line.split("\t") parts[0] = _phylowgs_compatible_chroms(parts[0]) line = "\t".join(parts) out_handle.write(line) return variant_type, out_file
[ "def", "_prep_vrn_file", "(", "in_file", ",", "vcaller", ",", "work_dir", ",", "somatic_info", ",", "ignore_file", ",", "config", ")", ":", "if", "vcaller", ".", "startswith", "(", "\"vardict\"", ")", ":", "variant_type", "=", "\"vardict\"", "elif", "vcaller",...
Create a variant file to feed into the PhyloWGS prep script, limiting records. Sorts by depth, adding top covered samples up to the sample_size supported by PhyloWGS. The logic is that the higher depth samples will have better resolution for frequency differences. More complex implementations could try to subset based on a distribution of frequencies to best sample the potential heterogeneity. Handles MuTect and VarDict as inputs to PhyloWGS. Fixes chromosome naming to use non chr-prefixed contigs, to match _prep_cnv_file.
[ "Create", "a", "variant", "file", "to", "feed", "into", "the", "PhyloWGS", "prep", "script", "limiting", "records", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L197-L243
224,037
bcbio/bcbio-nextgen
bcbio/srna/group.py
run_prepare
def run_prepare(*data): """ Run seqcluster prepare to merge all samples in one file """ out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare") out_dir = os.path.abspath(safe_makedir(out_dir)) prepare_dir = os.path.join(out_dir, "prepare") tools = dd.get_expression_caller(data[0][0]) if len(tools) == 0: logger.info("You didn't specify any other expression caller tool." "You can add to the YAML file:" "expression_caller:[trna, seqcluster, mirdeep2]") fn = [] for sample in data: name = sample[0]["rgnames"]['sample'] fn.append("%s\t%s" % (sample[0]['collapse'], name)) args = namedtuple('args', 'debug print_debug minc minl maxl out') args = args(False, False, 2, 17, 40, out_dir) ma_out = op.join(out_dir, "seqs.ma") seq_out = op.join(out_dir, "seqs.fastq") min_shared = max(int(len(fn) / 10.0), 1) if not file_exists(ma_out): seq_l, sample_l = prepare._read_fastq_files(fn, args) with file_transaction(ma_out) as ma_tx: with open(ma_tx, 'w') as ma_handle: with open(seq_out, 'w') as seq_handle: logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1") prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared) for sample in data: sample[0]["seqcluster_prepare_ma"] = ma_out sample[0]["seqcluster_prepare_fastq"] = seq_out return data
python
def run_prepare(*data): """ Run seqcluster prepare to merge all samples in one file """ out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare") out_dir = os.path.abspath(safe_makedir(out_dir)) prepare_dir = os.path.join(out_dir, "prepare") tools = dd.get_expression_caller(data[0][0]) if len(tools) == 0: logger.info("You didn't specify any other expression caller tool." "You can add to the YAML file:" "expression_caller:[trna, seqcluster, mirdeep2]") fn = [] for sample in data: name = sample[0]["rgnames"]['sample'] fn.append("%s\t%s" % (sample[0]['collapse'], name)) args = namedtuple('args', 'debug print_debug minc minl maxl out') args = args(False, False, 2, 17, 40, out_dir) ma_out = op.join(out_dir, "seqs.ma") seq_out = op.join(out_dir, "seqs.fastq") min_shared = max(int(len(fn) / 10.0), 1) if not file_exists(ma_out): seq_l, sample_l = prepare._read_fastq_files(fn, args) with file_transaction(ma_out) as ma_tx: with open(ma_tx, 'w') as ma_handle: with open(seq_out, 'w') as seq_handle: logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1") prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared) for sample in data: sample[0]["seqcluster_prepare_ma"] = ma_out sample[0]["seqcluster_prepare_fastq"] = seq_out return data
[ "def", "run_prepare", "(", "*", "data", ")", ":", "out_dir", "=", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", "[", "0", "]", "[", "0", "]", ")", ",", "\"seqcluster\"", ",", "\"prepare\"", ")", "out_dir", "=", "os", ...
Run seqcluster prepare to merge all samples in one file
[ "Run", "seqcluster", "prepare", "to", "merge", "all", "samples", "in", "one", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L24-L56
224,038
bcbio/bcbio-nextgen
bcbio/srna/group.py
run_align
def run_align(*data): """ Prepare data to run alignment step, only once for each project """ work_dir = dd.get_work_dir(data[0][0]) out_dir = op.join(work_dir, "seqcluster", "prepare") seq_out = op.join(out_dir, "seqs.fastq") bam_dir = op.join(work_dir, "align") new_bam_file = op.join(bam_dir, "seqs.bam") tools = dd.get_expression_caller(data[0][0]) if not file_exists(new_bam_file): sample = process_alignment(data[0][0], [seq_out, None]) bam_file = dd.get_work_bam(sample[0][0]) shutil.move(bam_file, new_bam_file) shutil.move(bam_file + ".bai", new_bam_file + ".bai") shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample'])) for sample in data: # sample[0]["align_bam"] = sample[0]["clean_fastq"] sample[0]["cluster_bam"] = new_bam_file if "mirdeep2" in tools: novel_db = mirdeep.run(data) return data
python
def run_align(*data): """ Prepare data to run alignment step, only once for each project """ work_dir = dd.get_work_dir(data[0][0]) out_dir = op.join(work_dir, "seqcluster", "prepare") seq_out = op.join(out_dir, "seqs.fastq") bam_dir = op.join(work_dir, "align") new_bam_file = op.join(bam_dir, "seqs.bam") tools = dd.get_expression_caller(data[0][0]) if not file_exists(new_bam_file): sample = process_alignment(data[0][0], [seq_out, None]) bam_file = dd.get_work_bam(sample[0][0]) shutil.move(bam_file, new_bam_file) shutil.move(bam_file + ".bai", new_bam_file + ".bai") shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample'])) for sample in data: # sample[0]["align_bam"] = sample[0]["clean_fastq"] sample[0]["cluster_bam"] = new_bam_file if "mirdeep2" in tools: novel_db = mirdeep.run(data) return data
[ "def", "run_align", "(", "*", "data", ")", ":", "work_dir", "=", "dd", ".", "get_work_dir", "(", "data", "[", "0", "]", "[", "0", "]", ")", "out_dir", "=", "op", ".", "join", "(", "work_dir", ",", "\"seqcluster\"", ",", "\"prepare\"", ")", "seq_out",...
Prepare data to run alignment step, only once for each project
[ "Prepare", "data", "to", "run", "alignment", "step", "only", "once", "for", "each", "project" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L58-L80
224,039
bcbio/bcbio-nextgen
bcbio/srna/group.py
run_cluster
def run_cluster(*data): """ Run seqcluster cluster to detect smallRNA clusters """ sample = data[0][0] tools = dd.get_expression_caller(data[0][0]) work_dir = dd.get_work_dir(sample) out_dir = op.join(work_dir, "seqcluster", "cluster") out_dir = op.abspath(safe_makedir(out_dir)) prepare_dir = op.join(work_dir, "seqcluster", "prepare") bam_file = data[0][0]["cluster_bam"] if "seqcluster" in tools: gtf_file = dd.get_transcriptome_gtf(sample) if dd.get_transcriptome_gtf(sample) else dd.get_srna_gtf_file(sample) sample["seqcluster"] = _cluster(bam_file, data[0][0]["seqcluster_prepare_ma"], out_dir, dd.get_ref_file(sample), gtf_file) sample["report"] = _report(sample, dd.get_ref_file(sample)) if "mirge" in tools: sample["mirge"] = mirge.run(data) out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, "mirbase")) if out_mirna: sample = dd.set_mirna_counts(sample, out_mirna[0]) sample = dd.set_isomir_counts(sample, out_mirna[1]) out_novel = _make_isomir_counts(data, "seqbuster_novel", op.join(work_dir, "mirdeep2"), "_novel") if out_novel: sample = dd.set_novel_mirna_counts(sample, out_novel[0]) sample = dd.set_novel_isomir_counts(sample, out_novel[1]) data[0][0] = sample data = spikein.combine_spikein(data) return data
python
def run_cluster(*data): """ Run seqcluster cluster to detect smallRNA clusters """ sample = data[0][0] tools = dd.get_expression_caller(data[0][0]) work_dir = dd.get_work_dir(sample) out_dir = op.join(work_dir, "seqcluster", "cluster") out_dir = op.abspath(safe_makedir(out_dir)) prepare_dir = op.join(work_dir, "seqcluster", "prepare") bam_file = data[0][0]["cluster_bam"] if "seqcluster" in tools: gtf_file = dd.get_transcriptome_gtf(sample) if dd.get_transcriptome_gtf(sample) else dd.get_srna_gtf_file(sample) sample["seqcluster"] = _cluster(bam_file, data[0][0]["seqcluster_prepare_ma"], out_dir, dd.get_ref_file(sample), gtf_file) sample["report"] = _report(sample, dd.get_ref_file(sample)) if "mirge" in tools: sample["mirge"] = mirge.run(data) out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, "mirbase")) if out_mirna: sample = dd.set_mirna_counts(sample, out_mirna[0]) sample = dd.set_isomir_counts(sample, out_mirna[1]) out_novel = _make_isomir_counts(data, "seqbuster_novel", op.join(work_dir, "mirdeep2"), "_novel") if out_novel: sample = dd.set_novel_mirna_counts(sample, out_novel[0]) sample = dd.set_novel_isomir_counts(sample, out_novel[1]) data[0][0] = sample data = spikein.combine_spikein(data) return data
[ "def", "run_cluster", "(", "*", "data", ")", ":", "sample", "=", "data", "[", "0", "]", "[", "0", "]", "tools", "=", "dd", ".", "get_expression_caller", "(", "data", "[", "0", "]", "[", "0", "]", ")", "work_dir", "=", "dd", ".", "get_work_dir", "...
Run seqcluster cluster to detect smallRNA clusters
[ "Run", "seqcluster", "cluster", "to", "detect", "smallRNA", "clusters" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L82-L114
224,040
bcbio/bcbio-nextgen
bcbio/srna/group.py
_cluster
def _cluster(bam_file, ma_file, out_dir, reference, annotation_file=None): """ Connect to seqcluster to run cluster with python directly """ seqcluster = op.join(get_bcbio_bin(), "seqcluster") # cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference] if annotation_file: annotation_file = "-g " + annotation_file else: annotation_file = "" if not file_exists(op.join(out_dir, "counts.tsv")): cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}") do.run(cmd.format(**locals()), "Running seqcluster.") counts = op.join(out_dir, "counts.tsv") stats = op.join(out_dir, "read_stats.tsv") json = op.join(out_dir, "seqcluster.json") return {'out_dir': out_dir, 'count_file': counts, 'stat_file': stats, 'json': json}
python
def _cluster(bam_file, ma_file, out_dir, reference, annotation_file=None): """ Connect to seqcluster to run cluster with python directly """ seqcluster = op.join(get_bcbio_bin(), "seqcluster") # cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference] if annotation_file: annotation_file = "-g " + annotation_file else: annotation_file = "" if not file_exists(op.join(out_dir, "counts.tsv")): cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}") do.run(cmd.format(**locals()), "Running seqcluster.") counts = op.join(out_dir, "counts.tsv") stats = op.join(out_dir, "read_stats.tsv") json = op.join(out_dir, "seqcluster.json") return {'out_dir': out_dir, 'count_file': counts, 'stat_file': stats, 'json': json}
[ "def", "_cluster", "(", "bam_file", ",", "ma_file", ",", "out_dir", ",", "reference", ",", "annotation_file", "=", "None", ")", ":", "seqcluster", "=", "op", ".", "join", "(", "get_bcbio_bin", "(", ")", ",", "\"seqcluster\"", ")", "# cl = [\"cluster\", \"-o\",...
Connect to seqcluster to run cluster with python directly
[ "Connect", "to", "seqcluster", "to", "run", "cluster", "with", "python", "directly" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L116-L133
224,041
bcbio/bcbio-nextgen
bcbio/srna/group.py
_report
def _report(data, reference): """ Run report of seqcluster to get browser options for results """ seqcluster = op.join(get_bcbio_bin(), "seqcluster") work_dir = dd.get_work_dir(data) out_dir = safe_makedir(os.path.join(work_dir, "seqcluster", "report")) out_file = op.join(out_dir, "seqcluster.db") json = op.join(work_dir, "seqcluster", "cluster", "seqcluster.json") cmd = ("{seqcluster} report -o {out_dir} -r {reference} -j {json}") if not file_exists(out_file): do.run(cmd.format(**locals()), "Run report on clusters") return out_file
python
def _report(data, reference): """ Run report of seqcluster to get browser options for results """ seqcluster = op.join(get_bcbio_bin(), "seqcluster") work_dir = dd.get_work_dir(data) out_dir = safe_makedir(os.path.join(work_dir, "seqcluster", "report")) out_file = op.join(out_dir, "seqcluster.db") json = op.join(work_dir, "seqcluster", "cluster", "seqcluster.json") cmd = ("{seqcluster} report -o {out_dir} -r {reference} -j {json}") if not file_exists(out_file): do.run(cmd.format(**locals()), "Run report on clusters") return out_file
[ "def", "_report", "(", "data", ",", "reference", ")", ":", "seqcluster", "=", "op", ".", "join", "(", "get_bcbio_bin", "(", ")", ",", "\"seqcluster\"", ")", "work_dir", "=", "dd", ".", "get_work_dir", "(", "data", ")", "out_dir", "=", "safe_makedir", "("...
Run report of seqcluster to get browser options for results
[ "Run", "report", "of", "seqcluster", "to", "get", "browser", "options", "for", "results" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L135-L147
224,042
bcbio/bcbio-nextgen
bcbio/srna/group.py
report
def report(data): """Create a Rmd report for small RNAseq analysis""" work_dir = dd.get_work_dir(data[0][0]) out_dir = op.join(work_dir, "report") safe_makedir(out_dir) summary_file = op.join(out_dir, "summary.csv") with file_transaction(summary_file) as out_tx: with open(out_tx, 'w') as out_handle: out_handle.write("sample_id,%s\n" % _guess_header(data[0][0])) for sample in data: info = sample[0] group = _guess_group(info) files = info["seqbuster"] if "seqbuster" in info else "None" out_handle.write(",".join([dd.get_sample_name(info), group]) + "\n") _modify_report(work_dir, out_dir) return summary_file
python
def report(data): """Create a Rmd report for small RNAseq analysis""" work_dir = dd.get_work_dir(data[0][0]) out_dir = op.join(work_dir, "report") safe_makedir(out_dir) summary_file = op.join(out_dir, "summary.csv") with file_transaction(summary_file) as out_tx: with open(out_tx, 'w') as out_handle: out_handle.write("sample_id,%s\n" % _guess_header(data[0][0])) for sample in data: info = sample[0] group = _guess_group(info) files = info["seqbuster"] if "seqbuster" in info else "None" out_handle.write(",".join([dd.get_sample_name(info), group]) + "\n") _modify_report(work_dir, out_dir) return summary_file
[ "def", "report", "(", "data", ")", ":", "work_dir", "=", "dd", ".", "get_work_dir", "(", "data", "[", "0", "]", "[", "0", "]", ")", "out_dir", "=", "op", ".", "join", "(", "work_dir", ",", "\"report\"", ")", "safe_makedir", "(", "out_dir", ")", "su...
Create a Rmd report for small RNAseq analysis
[ "Create", "a", "Rmd", "report", "for", "small", "RNAseq", "analysis" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L149-L165
224,043
bcbio/bcbio-nextgen
bcbio/srna/group.py
_modify_report
def _modify_report(summary_path, out_dir): """Read Rmd template and dump with project path.""" summary_path = op.abspath(summary_path) template = op.normpath(op.join(op.dirname(op.realpath(template_seqcluster.__file__)), "report.rmd")) content = open(template).read() out_content = string.Template(content).safe_substitute({'path_abs': summary_path}) out_file = op.join(out_dir, "srna_report.rmd") with open(out_file, 'w') as out_handle: out_handle.write(out_content) return out_file
python
def _modify_report(summary_path, out_dir): """Read Rmd template and dump with project path.""" summary_path = op.abspath(summary_path) template = op.normpath(op.join(op.dirname(op.realpath(template_seqcluster.__file__)), "report.rmd")) content = open(template).read() out_content = string.Template(content).safe_substitute({'path_abs': summary_path}) out_file = op.join(out_dir, "srna_report.rmd") with open(out_file, 'w') as out_handle: out_handle.write(out_content) return out_file
[ "def", "_modify_report", "(", "summary_path", ",", "out_dir", ")", ":", "summary_path", "=", "op", ".", "abspath", "(", "summary_path", ")", "template", "=", "op", ".", "normpath", "(", "op", ".", "join", "(", "op", ".", "dirname", "(", "op", ".", "rea...
Read Rmd template and dump with project path.
[ "Read", "Rmd", "template", "and", "dump", "with", "project", "path", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L183-L192
224,044
bcbio/bcbio-nextgen
bcbio/srna/group.py
_make_isomir_counts
def _make_isomir_counts(data, srna_type="seqbuster", out_dir=None, stem=""): """ Parse miraligner files to create count matrix. """ work_dir = dd.get_work_dir(data[0][0]) if not out_dir: out_dir = op.join(work_dir, "mirbase") out_novel_isomir = append_stem(op.join(out_dir, "counts.tsv"), stem) out_novel_mirna = append_stem(op.join(out_dir, "counts_mirna.tsv"), stem) logger.debug("Create %s count data at %s." % (srna_type, out_dir)) if file_exists(out_novel_mirna): return [out_novel_mirna, out_novel_isomir] out_dts = [] for sample in data: if sample[0].get(srna_type): miraligner_fn = sample[0][srna_type] reads = _read_miraligner(miraligner_fn) if reads: out_file, dt, dt_pre = _tab_output(reads, miraligner_fn + ".back", dd.get_sample_name(sample[0])) out_dts.append(dt) else: logger.debug("WARNING::%s has NOT miRNA annotated for %s. Check if fasta files is small or species value." % (dd.get_sample_name(sample[0]), srna_type)) if out_dts: out_files = _create_counts(out_dts, out_dir) out_files = [move_safe(out_files[0], out_novel_isomir), move_safe(out_files[1], out_novel_mirna)] return out_files else: logger.debug("WARNING::any samples have miRNA annotated for %s. Check if fasta files is small or species value." % srna_type)
python
def _make_isomir_counts(data, srna_type="seqbuster", out_dir=None, stem=""): """ Parse miraligner files to create count matrix. """ work_dir = dd.get_work_dir(data[0][0]) if not out_dir: out_dir = op.join(work_dir, "mirbase") out_novel_isomir = append_stem(op.join(out_dir, "counts.tsv"), stem) out_novel_mirna = append_stem(op.join(out_dir, "counts_mirna.tsv"), stem) logger.debug("Create %s count data at %s." % (srna_type, out_dir)) if file_exists(out_novel_mirna): return [out_novel_mirna, out_novel_isomir] out_dts = [] for sample in data: if sample[0].get(srna_type): miraligner_fn = sample[0][srna_type] reads = _read_miraligner(miraligner_fn) if reads: out_file, dt, dt_pre = _tab_output(reads, miraligner_fn + ".back", dd.get_sample_name(sample[0])) out_dts.append(dt) else: logger.debug("WARNING::%s has NOT miRNA annotated for %s. Check if fasta files is small or species value." % (dd.get_sample_name(sample[0]), srna_type)) if out_dts: out_files = _create_counts(out_dts, out_dir) out_files = [move_safe(out_files[0], out_novel_isomir), move_safe(out_files[1], out_novel_mirna)] return out_files else: logger.debug("WARNING::any samples have miRNA annotated for %s. Check if fasta files is small or species value." % srna_type)
[ "def", "_make_isomir_counts", "(", "data", ",", "srna_type", "=", "\"seqbuster\"", ",", "out_dir", "=", "None", ",", "stem", "=", "\"\"", ")", ":", "work_dir", "=", "dd", ".", "get_work_dir", "(", "data", "[", "0", "]", "[", "0", "]", ")", "if", "not...
Parse miraligner files to create count matrix.
[ "Parse", "miraligner", "files", "to", "create", "count", "matrix", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L194-L221
224,045
bcbio/bcbio-nextgen
bcbio/bam/coverage.py
_split_regions
def _split_regions(chrom, start, end): """Split regions longer than 100kb into smaller sections. """ window_size = 1e5 if end - start < window_size * 5: return [(chrom, start, end)] else: out = [] for r in pybedtools.BedTool().window_maker(w=window_size, b=pybedtools.BedTool("%s\t%s\t%s" % (chrom, start, end), from_string=True)): out.append((r.chrom, r.start, r.end)) return out
python
def _split_regions(chrom, start, end): """Split regions longer than 100kb into smaller sections. """ window_size = 1e5 if end - start < window_size * 5: return [(chrom, start, end)] else: out = [] for r in pybedtools.BedTool().window_maker(w=window_size, b=pybedtools.BedTool("%s\t%s\t%s" % (chrom, start, end), from_string=True)): out.append((r.chrom, r.start, r.end)) return out
[ "def", "_split_regions", "(", "chrom", ",", "start", ",", "end", ")", ":", "window_size", "=", "1e5", "if", "end", "-", "start", "<", "window_size", "*", "5", ":", "return", "[", "(", "chrom", ",", "start", ",", "end", ")", "]", "else", ":", "out",...
Split regions longer than 100kb into smaller sections.
[ "Split", "regions", "longer", "than", "100kb", "into", "smaller", "sections", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/coverage.py#L96-L108
224,046
bcbio/bcbio-nextgen
bcbio/bam/coverage.py
plot_multiple_regions_coverage
def plot_multiple_regions_coverage(samples, out_file, data, region_bed=None, stem_bed=None): """ given a list of bcbio samples and a bed file or BedTool of regions, makes a plot of the coverage in the regions for the set of samples if given a bed file or BedTool of locations in stem_bed with a label, plots lollipops at those locations """ mpl.use('Agg', force=True) PAD = 100 if file_exists(out_file): return out_file in_bams = [dd.get_align_bam(x) for x in samples] samplenames = [dd.get_sample_name(x) for x in samples] if isinstance(region_bed, six.string_types): region_bed = pybedtools.BedTool(region_bed) if isinstance(stem_bed, six.string_types): stem_bed = pybedtools.BedTool(stem_bed) if stem_bed is not None: # tabix indexed bedtools eval to false stem_bed = stem_bed.tabix() plt.clf() plt.cla() with file_transaction(out_file) as tx_out_file: with backend_pdf.PdfPages(tx_out_file) as pdf_out: sns.despine() for line in region_bed: for chrom, start, end in _split_regions(line.chrom, max(line.start - PAD, 0), line.end + PAD): df = _combine_regional_coverage(in_bams, samplenames, chrom, start, end, os.path.dirname(tx_out_file), data) plot = sns.tsplot(df, time="position", unit="chrom", value="coverage", condition="sample") if stem_bed is not None: # tabix indexed bedtools eval to false interval = pybedtools.Interval(chrom, start, end) _add_stems_to_plot(interval, stem_bed, samples, plot) plt.title("{chrom}:{start}-{end}".format(**locals())) pdf_out.savefig(plot.get_figure()) plt.close() return out_file
python
def plot_multiple_regions_coverage(samples, out_file, data, region_bed=None, stem_bed=None): """ given a list of bcbio samples and a bed file or BedTool of regions, makes a plot of the coverage in the regions for the set of samples if given a bed file or BedTool of locations in stem_bed with a label, plots lollipops at those locations """ mpl.use('Agg', force=True) PAD = 100 if file_exists(out_file): return out_file in_bams = [dd.get_align_bam(x) for x in samples] samplenames = [dd.get_sample_name(x) for x in samples] if isinstance(region_bed, six.string_types): region_bed = pybedtools.BedTool(region_bed) if isinstance(stem_bed, six.string_types): stem_bed = pybedtools.BedTool(stem_bed) if stem_bed is not None: # tabix indexed bedtools eval to false stem_bed = stem_bed.tabix() plt.clf() plt.cla() with file_transaction(out_file) as tx_out_file: with backend_pdf.PdfPages(tx_out_file) as pdf_out: sns.despine() for line in region_bed: for chrom, start, end in _split_regions(line.chrom, max(line.start - PAD, 0), line.end + PAD): df = _combine_regional_coverage(in_bams, samplenames, chrom, start, end, os.path.dirname(tx_out_file), data) plot = sns.tsplot(df, time="position", unit="chrom", value="coverage", condition="sample") if stem_bed is not None: # tabix indexed bedtools eval to false interval = pybedtools.Interval(chrom, start, end) _add_stems_to_plot(interval, stem_bed, samples, plot) plt.title("{chrom}:{start}-{end}".format(**locals())) pdf_out.savefig(plot.get_figure()) plt.close() return out_file
[ "def", "plot_multiple_regions_coverage", "(", "samples", ",", "out_file", ",", "data", ",", "region_bed", "=", "None", ",", "stem_bed", "=", "None", ")", ":", "mpl", ".", "use", "(", "'Agg'", ",", "force", "=", "True", ")", "PAD", "=", "100", "if", "fi...
given a list of bcbio samples and a bed file or BedTool of regions, makes a plot of the coverage in the regions for the set of samples if given a bed file or BedTool of locations in stem_bed with a label, plots lollipops at those locations
[ "given", "a", "list", "of", "bcbio", "samples", "and", "a", "bed", "file", "or", "BedTool", "of", "regions", "makes", "a", "plot", "of", "the", "coverage", "in", "the", "regions", "for", "the", "set", "of", "samples" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/coverage.py#L110-L148
224,047
bcbio/bcbio-nextgen
bcbio/variation/mutect.py
_config_params
def _config_params(base_config, assoc_files, region, out_file, items): """Add parameters based on configuration variables, associated files and genomic regions. """ params = [] dbsnp = assoc_files.get("dbsnp") if dbsnp: params += ["--dbsnp", dbsnp] cosmic = assoc_files.get("cosmic") if cosmic: params += ["--cosmic", cosmic] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] # set low frequency calling parameter if adjusted # to set other MuTect parameters on contamination, pass options to resources for mutect # --fraction_contamination --minimum_normal_allele_fraction min_af = tz.get_in(["algorithm", "min_allele_fraction"], base_config) if min_af: params += ["--minimum_mutation_cell_fraction", "%.2f" % (min_af / 100.0)] resources = config_utils.get_resources("mutect", base_config) if resources.get("options") is not None: params += [str(x) for x in resources.get("options", [])] # Output quality scores if "--enable_qscore_output" not in params: params.append("--enable_qscore_output") # drf not currently supported in MuTect to turn off duplicateread filter # params += gatk.standard_cl_params(items) return params
python
def _config_params(base_config, assoc_files, region, out_file, items): """Add parameters based on configuration variables, associated files and genomic regions. """ params = [] dbsnp = assoc_files.get("dbsnp") if dbsnp: params += ["--dbsnp", dbsnp] cosmic = assoc_files.get("cosmic") if cosmic: params += ["--cosmic", cosmic] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] # set low frequency calling parameter if adjusted # to set other MuTect parameters on contamination, pass options to resources for mutect # --fraction_contamination --minimum_normal_allele_fraction min_af = tz.get_in(["algorithm", "min_allele_fraction"], base_config) if min_af: params += ["--minimum_mutation_cell_fraction", "%.2f" % (min_af / 100.0)] resources = config_utils.get_resources("mutect", base_config) if resources.get("options") is not None: params += [str(x) for x in resources.get("options", [])] # Output quality scores if "--enable_qscore_output" not in params: params.append("--enable_qscore_output") # drf not currently supported in MuTect to turn off duplicateread filter # params += gatk.standard_cl_params(items) return params
[ "def", "_config_params", "(", "base_config", ",", "assoc_files", ",", "region", ",", "out_file", ",", "items", ")", ":", "params", "=", "[", "]", "dbsnp", "=", "assoc_files", ".", "get", "(", "\"dbsnp\"", ")", "if", "dbsnp", ":", "params", "+=", "[", "...
Add parameters based on configuration variables, associated files and genomic regions.
[ "Add", "parameters", "based", "on", "configuration", "variables", "associated", "files", "and", "genomic", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L46-L75
224,048
bcbio/bcbio-nextgen
bcbio/variation/mutect.py
_mutect_call_prep
def _mutect_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Preparation work for MuTect. """ base_config = items[0]["config"] broad_runner = broad.runner_from_path("picard", base_config) broad_runner.run_fn("picard_index_ref", ref_file) broad_runner = broad.runner_from_config(base_config, "mutect") _check_mutect_version(broad_runner) for x in align_bams: bam.index(x, base_config) paired = vcfutils.get_paired_bams(align_bams, items) if not paired: raise ValueError("Specified MuTect calling but 'tumor' phenotype not present in batch\n" "https://bcbio-nextgen.readthedocs.org/en/latest/contents/" "pipelines.html#cancer-variant-calling\n" "for samples: %s" % ", " .join([dd.get_sample_name(x) for x in items])) params = ["-R", ref_file, "-T", "MuTect", "-U", "ALLOW_N_CIGAR_READS"] params += ["--read_filter", "NotPrimaryAlignment"] params += ["-I:tumor", paired.tumor_bam] params += ["--tumor_sample_name", paired.tumor_name] if paired.normal_bam is not None: params += ["-I:normal", paired.normal_bam] params += ["--normal_sample_name", paired.normal_name] if paired.normal_panel is not None: params += ["--normal_panel", paired.normal_panel] params += _config_params(base_config, assoc_files, region, out_file, items) return broad_runner, params
python
def _mutect_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Preparation work for MuTect. """ base_config = items[0]["config"] broad_runner = broad.runner_from_path("picard", base_config) broad_runner.run_fn("picard_index_ref", ref_file) broad_runner = broad.runner_from_config(base_config, "mutect") _check_mutect_version(broad_runner) for x in align_bams: bam.index(x, base_config) paired = vcfutils.get_paired_bams(align_bams, items) if not paired: raise ValueError("Specified MuTect calling but 'tumor' phenotype not present in batch\n" "https://bcbio-nextgen.readthedocs.org/en/latest/contents/" "pipelines.html#cancer-variant-calling\n" "for samples: %s" % ", " .join([dd.get_sample_name(x) for x in items])) params = ["-R", ref_file, "-T", "MuTect", "-U", "ALLOW_N_CIGAR_READS"] params += ["--read_filter", "NotPrimaryAlignment"] params += ["-I:tumor", paired.tumor_bam] params += ["--tumor_sample_name", paired.tumor_name] if paired.normal_bam is not None: params += ["-I:normal", paired.normal_bam] params += ["--normal_sample_name", paired.normal_name] if paired.normal_panel is not None: params += ["--normal_panel", paired.normal_panel] params += _config_params(base_config, assoc_files, region, out_file, items) return broad_runner, params
[ "def", "_mutect_call_prep", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "base_config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "broad_runner", "=", ...
Preparation work for MuTect.
[ "Preparation", "work", "for", "MuTect", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L77-L106
224,049
bcbio/bcbio-nextgen
bcbio/variation/mutect.py
_SID_call_prep
def _SID_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Preparation work for SomaticIndelDetector. """ base_config = items[0]["config"] for x in align_bams: bam.index(x, base_config) params = ["-R", ref_file, "-T", "SomaticIndelDetector", "-U", "ALLOW_N_CIGAR_READS"] # Limit per base read start count to between 200-10000, i.e. from any base # can no more 10000 new reads begin. # Further, limit maxNumberOfReads accordingly, otherwise SID discards # windows for high coverage panels. paired = vcfutils.get_paired_bams(align_bams, items) params += ["--read_filter", "NotPrimaryAlignment"] params += ["-I:tumor", paired.tumor_bam] min_af = float(get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 if paired.normal_bam is not None: params += ["-I:normal", paired.normal_bam] # notice there must be at least 4 reads of coverage in normal params += ["--filter_expressions", "T_COV<6||N_COV<4||T_INDEL_F<%s||T_INDEL_CF<0.7" % min_af] else: params += ["--unpaired"] params += ["--filter_expressions", "COV<6||INDEL_F<%s||INDEL_CF<0.7" % min_af] if region: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] return params
python
def _SID_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Preparation work for SomaticIndelDetector. """ base_config = items[0]["config"] for x in align_bams: bam.index(x, base_config) params = ["-R", ref_file, "-T", "SomaticIndelDetector", "-U", "ALLOW_N_CIGAR_READS"] # Limit per base read start count to between 200-10000, i.e. from any base # can no more 10000 new reads begin. # Further, limit maxNumberOfReads accordingly, otherwise SID discards # windows for high coverage panels. paired = vcfutils.get_paired_bams(align_bams, items) params += ["--read_filter", "NotPrimaryAlignment"] params += ["-I:tumor", paired.tumor_bam] min_af = float(get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 if paired.normal_bam is not None: params += ["-I:normal", paired.normal_bam] # notice there must be at least 4 reads of coverage in normal params += ["--filter_expressions", "T_COV<6||N_COV<4||T_INDEL_F<%s||T_INDEL_CF<0.7" % min_af] else: params += ["--unpaired"] params += ["--filter_expressions", "COV<6||INDEL_F<%s||INDEL_CF<0.7" % min_af] if region: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] return params
[ "def", "_SID_call_prep", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "base_config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "for", "x", "in", "al...
Preparation work for SomaticIndelDetector.
[ "Preparation", "work", "for", "SomaticIndelDetector", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L191-L217
224,050
bcbio/bcbio-nextgen
bcbio/variation/mutect.py
_fix_mutect_output
def _fix_mutect_output(orig_file, config, out_file, is_paired): """Adjust MuTect output to match other callers. - Rename allelic fraction field in mutect output from FA to FREQ to standarize with other tools - Remove extra 'none' samples introduced when calling tumor-only samples """ out_file_noc = out_file.replace(".vcf.gz", ".vcf") none_index = -1 with file_transaction(config, out_file_noc) as tx_out_file: with open_gzipsafe(orig_file) as in_handle: with open(tx_out_file, 'w') as out_handle: for line in in_handle: if not is_paired and line.startswith("#CHROM"): parts = line.rstrip().split("\t") none_index = parts.index("none") del parts[none_index] line = "\t".join(parts) + "\n" elif line.startswith("##FORMAT=<ID=FA"): line = line.replace("=FA", "=FREQ") elif not line.startswith("#"): if none_index > 0: parts = line.rstrip().split("\t") del parts[none_index] line = "\t".join(parts) + "\n" line = line.replace("FA", "FREQ") out_handle.write(line) return bgzip_and_index(out_file_noc, config)
python
def _fix_mutect_output(orig_file, config, out_file, is_paired): """Adjust MuTect output to match other callers. - Rename allelic fraction field in mutect output from FA to FREQ to standarize with other tools - Remove extra 'none' samples introduced when calling tumor-only samples """ out_file_noc = out_file.replace(".vcf.gz", ".vcf") none_index = -1 with file_transaction(config, out_file_noc) as tx_out_file: with open_gzipsafe(orig_file) as in_handle: with open(tx_out_file, 'w') as out_handle: for line in in_handle: if not is_paired and line.startswith("#CHROM"): parts = line.rstrip().split("\t") none_index = parts.index("none") del parts[none_index] line = "\t".join(parts) + "\n" elif line.startswith("##FORMAT=<ID=FA"): line = line.replace("=FA", "=FREQ") elif not line.startswith("#"): if none_index > 0: parts = line.rstrip().split("\t") del parts[none_index] line = "\t".join(parts) + "\n" line = line.replace("FA", "FREQ") out_handle.write(line) return bgzip_and_index(out_file_noc, config)
[ "def", "_fix_mutect_output", "(", "orig_file", ",", "config", ",", "out_file", ",", "is_paired", ")", ":", "out_file_noc", "=", "out_file", ".", "replace", "(", "\".vcf.gz\"", ",", "\".vcf\"", ")", "none_index", "=", "-", "1", "with", "file_transaction", "(", ...
Adjust MuTect output to match other callers. - Rename allelic fraction field in mutect output from FA to FREQ to standarize with other tools - Remove extra 'none' samples introduced when calling tumor-only samples
[ "Adjust", "MuTect", "output", "to", "match", "other", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L219-L245
224,051
bcbio/bcbio-nextgen
bcbio/variation/population.py
prep_gemini_db
def prep_gemini_db(fnames, call_info, samples, extras): """Prepare a gemini database from VCF inputs prepared with snpEff. """ data = samples[0] name, caller, is_batch = call_info build_type = _get_build_type(fnames, samples, caller) out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini")) gemini_vcf = get_multisample_vcf(fnames, name, caller, data) # If we're building a gemini database, normalize the inputs if build_type: passonly = all("gemini_allvariants" not in dd.get_tools_on(d) for d in samples) gemini_vcf = normalize.normalize(gemini_vcf, data, passonly=passonly) decomposed = True else: decomposed = False ann_vcf = run_vcfanno(gemini_vcf, data, decomposed) gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller)) if ann_vcf and build_type and not utils.file_exists(gemini_db): ped_file = create_ped_file(samples + extras, gemini_vcf) # Original approach for hg19/GRCh37 if vcfanno.is_human(data, builds=["37"]) and "gemini_orig" in build_type: gemini_db = create_gemini_db_orig(gemini_vcf, data, gemini_db, ped_file) else: gemini_db = create_gemini_db(ann_vcf, data, gemini_db, ped_file) # only pass along gemini_vcf_downstream if uniquely created here if os.path.islink(gemini_vcf): gemini_vcf = None return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None, "vcf": ann_vcf or gemini_vcf, "decomposed": decomposed}]]
python
def prep_gemini_db(fnames, call_info, samples, extras): """Prepare a gemini database from VCF inputs prepared with snpEff. """ data = samples[0] name, caller, is_batch = call_info build_type = _get_build_type(fnames, samples, caller) out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini")) gemini_vcf = get_multisample_vcf(fnames, name, caller, data) # If we're building a gemini database, normalize the inputs if build_type: passonly = all("gemini_allvariants" not in dd.get_tools_on(d) for d in samples) gemini_vcf = normalize.normalize(gemini_vcf, data, passonly=passonly) decomposed = True else: decomposed = False ann_vcf = run_vcfanno(gemini_vcf, data, decomposed) gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller)) if ann_vcf and build_type and not utils.file_exists(gemini_db): ped_file = create_ped_file(samples + extras, gemini_vcf) # Original approach for hg19/GRCh37 if vcfanno.is_human(data, builds=["37"]) and "gemini_orig" in build_type: gemini_db = create_gemini_db_orig(gemini_vcf, data, gemini_db, ped_file) else: gemini_db = create_gemini_db(ann_vcf, data, gemini_db, ped_file) # only pass along gemini_vcf_downstream if uniquely created here if os.path.islink(gemini_vcf): gemini_vcf = None return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None, "vcf": ann_vcf or gemini_vcf, "decomposed": decomposed}]]
[ "def", "prep_gemini_db", "(", "fnames", ",", "call_info", ",", "samples", ",", "extras", ")", ":", "data", "=", "samples", "[", "0", "]", "name", ",", "caller", ",", "is_batch", "=", "call_info", "build_type", "=", "_get_build_type", "(", "fnames", ",", ...
Prepare a gemini database from VCF inputs prepared with snpEff.
[ "Prepare", "a", "gemini", "database", "from", "VCF", "inputs", "prepared", "with", "snpEff", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L25-L54
224,052
bcbio/bcbio-nextgen
bcbio/variation/population.py
_back_compatible_gemini
def _back_compatible_gemini(conf_files, data): """Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations. """ if vcfanno.is_human(data, builds=["37"]): for f in conf_files: if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f): with open(f) as in_handle: for line in in_handle: if line.startswith("file"): fname = line.strip().split("=")[-1].replace('"', '').strip() if fname.find(".tidy.") > 0: return install.get_gemini_dir(data) return None
python
def _back_compatible_gemini(conf_files, data): """Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations. """ if vcfanno.is_human(data, builds=["37"]): for f in conf_files: if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f): with open(f) as in_handle: for line in in_handle: if line.startswith("file"): fname = line.strip().split("=")[-1].replace('"', '').strip() if fname.find(".tidy.") > 0: return install.get_gemini_dir(data) return None
[ "def", "_back_compatible_gemini", "(", "conf_files", ",", "data", ")", ":", "if", "vcfanno", ".", "is_human", "(", "data", ",", "builds", "=", "[", "\"37\"", "]", ")", ":", "for", "f", "in", "conf_files", ":", "if", "f", "and", "os", ".", "path", "."...
Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations.
[ "Provide", "old", "install", "directory", "for", "configuration", "with", "GEMINI", "supplied", "tidy", "VCFs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L56-L71
224,053
bcbio/bcbio-nextgen
bcbio/variation/population.py
run_vcfanno
def run_vcfanno(vcf_file, data, decomposed=False): """Run vcfanno, providing annotations from external databases if needed. Puts together lua and conf files from multiple inputs by file names. """ conf_files = dd.get_vcfanno(data) if conf_files: with_basepaths = collections.defaultdict(list) gemini_basepath = _back_compatible_gemini(conf_files, data) for f in conf_files: name = os.path.splitext(os.path.basename(f))[0] if f.endswith(".lua"): conf_file = None lua_file = f else: conf_file = f lua_file = "%s.lua" % utils.splitext_plus(conf_file)[0] if lua_file and not os.path.exists(lua_file): lua_file = None data_basepath = gemini_basepath if name == "gemini" else None if conf_file and os.path.exists(conf_file): with_basepaths[(data_basepath, name)].append(conf_file) if lua_file and os.path.exists(lua_file): with_basepaths[(data_basepath, name)].append(lua_file) conf_files = with_basepaths.items() out_file = None if conf_files: VcfannoIn = collections.namedtuple("VcfannoIn", ["conf", "lua"]) bp_files = collections.defaultdict(list) for (data_basepath, name), anno_files in conf_files: anno_files = list(set(anno_files)) if len(anno_files) == 1: cur = VcfannoIn(anno_files[0], None) elif len(anno_files) == 2: lua_files = [x for x in anno_files if x.endswith(".lua")] assert len(lua_files) == 1, anno_files lua_file = lua_files[0] anno_files.remove(lua_file) cur = VcfannoIn(anno_files[0], lua_file) else: raise ValueError("Unexpected annotation group %s" % anno_files) bp_files[data_basepath].append(cur) for data_basepath, anno_files in bp_files.items(): ann_file = vcfanno.run(vcf_file, [x.conf for x in anno_files], [x.lua for x in anno_files], data, basepath=data_basepath, decomposed=decomposed) if ann_file: out_file = ann_file vcf_file = ann_file return out_file
python
def run_vcfanno(vcf_file, data, decomposed=False): """Run vcfanno, providing annotations from external databases if needed. Puts together lua and conf files from multiple inputs by file names. """ conf_files = dd.get_vcfanno(data) if conf_files: with_basepaths = collections.defaultdict(list) gemini_basepath = _back_compatible_gemini(conf_files, data) for f in conf_files: name = os.path.splitext(os.path.basename(f))[0] if f.endswith(".lua"): conf_file = None lua_file = f else: conf_file = f lua_file = "%s.lua" % utils.splitext_plus(conf_file)[0] if lua_file and not os.path.exists(lua_file): lua_file = None data_basepath = gemini_basepath if name == "gemini" else None if conf_file and os.path.exists(conf_file): with_basepaths[(data_basepath, name)].append(conf_file) if lua_file and os.path.exists(lua_file): with_basepaths[(data_basepath, name)].append(lua_file) conf_files = with_basepaths.items() out_file = None if conf_files: VcfannoIn = collections.namedtuple("VcfannoIn", ["conf", "lua"]) bp_files = collections.defaultdict(list) for (data_basepath, name), anno_files in conf_files: anno_files = list(set(anno_files)) if len(anno_files) == 1: cur = VcfannoIn(anno_files[0], None) elif len(anno_files) == 2: lua_files = [x for x in anno_files if x.endswith(".lua")] assert len(lua_files) == 1, anno_files lua_file = lua_files[0] anno_files.remove(lua_file) cur = VcfannoIn(anno_files[0], lua_file) else: raise ValueError("Unexpected annotation group %s" % anno_files) bp_files[data_basepath].append(cur) for data_basepath, anno_files in bp_files.items(): ann_file = vcfanno.run(vcf_file, [x.conf for x in anno_files], [x.lua for x in anno_files], data, basepath=data_basepath, decomposed=decomposed) if ann_file: out_file = ann_file vcf_file = ann_file return out_file
[ "def", "run_vcfanno", "(", "vcf_file", ",", "data", ",", "decomposed", "=", "False", ")", ":", "conf_files", "=", "dd", ".", "get_vcfanno", "(", "data", ")", "if", "conf_files", ":", "with_basepaths", "=", "collections", ".", "defaultdict", "(", "list", ")...
Run vcfanno, providing annotations from external databases if needed. Puts together lua and conf files from multiple inputs by file names.
[ "Run", "vcfanno", "providing", "annotations", "from", "external", "databases", "if", "needed", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L73-L123
224,054
bcbio/bcbio-nextgen
bcbio/variation/population.py
get_ped_info
def get_ped_info(data, samples): """Retrieve all PED info from metadata """ family_id = tz.get_in(["metadata", "family_id"], data, None) if not family_id: family_id = _find_shared_batch(samples) return { "gender": {"male": 1, "female": 2, "unknown": 0}.get(get_gender(data)), "individual_id": dd.get_sample_name(data), "family_id": family_id, "maternal_id": tz.get_in(["metadata", "maternal_id"], data, -9), "paternal_id": tz.get_in(["metadata", "paternal_id"], data, -9), "affected": get_affected_status(data), "ethnicity": tz.get_in(["metadata", "ethnicity"], data, -9) }
python
def get_ped_info(data, samples): """Retrieve all PED info from metadata """ family_id = tz.get_in(["metadata", "family_id"], data, None) if not family_id: family_id = _find_shared_batch(samples) return { "gender": {"male": 1, "female": 2, "unknown": 0}.get(get_gender(data)), "individual_id": dd.get_sample_name(data), "family_id": family_id, "maternal_id": tz.get_in(["metadata", "maternal_id"], data, -9), "paternal_id": tz.get_in(["metadata", "paternal_id"], data, -9), "affected": get_affected_status(data), "ethnicity": tz.get_in(["metadata", "ethnicity"], data, -9) }
[ "def", "get_ped_info", "(", "data", ",", "samples", ")", ":", "family_id", "=", "tz", ".", "get_in", "(", "[", "\"metadata\"", ",", "\"family_id\"", "]", ",", "data", ",", "None", ")", "if", "not", "family_id", ":", "family_id", "=", "_find_shared_batch", ...
Retrieve all PED info from metadata
[ "Retrieve", "all", "PED", "info", "from", "metadata" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L224-L238
224,055
bcbio/bcbio-nextgen
bcbio/variation/population.py
create_ped_file
def create_ped_file(samples, base_vcf, out_dir=None): """Create a GEMINI-compatible PED file, including gender, family and phenotype information. Checks for a specified `ped` file in metadata, and will use sample information from this file before reconstituting from metadata information. """ out_file = "%s.ped" % utils.splitext_plus(base_vcf)[0] if out_dir: out_file = os.path.join(out_dir, os.path.basename(out_file)) sample_ped_lines = {} header = ["#Family_ID", "Individual_ID", "Paternal_ID", "Maternal_ID", "Sex", "Phenotype", "Ethnicity"] for md_ped in list(set([x for x in [tz.get_in(["metadata", "ped"], data) for data in samples] if x is not None])): with open(md_ped) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for parts in reader: if parts[0].startswith("#") and len(parts) > len(header): header = header + parts[len(header):] else: sample_ped_lines[parts[1]] = parts if not utils.file_exists(out_file): with file_transaction(samples[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: want_samples = set(vcfutils.get_samples(base_vcf)) writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(header) for data in samples: ped_info = get_ped_info(data, samples) sname = ped_info["individual_id"] if sname in want_samples: want_samples.remove(sname) if sname in sample_ped_lines: writer.writerow(sample_ped_lines[sname]) else: writer.writerow([ped_info["family_id"], ped_info["individual_id"], ped_info["paternal_id"], ped_info["maternal_id"], ped_info["gender"], ped_info["affected"], ped_info["ethnicity"]]) return out_file
python
def create_ped_file(samples, base_vcf, out_dir=None): """Create a GEMINI-compatible PED file, including gender, family and phenotype information. Checks for a specified `ped` file in metadata, and will use sample information from this file before reconstituting from metadata information. """ out_file = "%s.ped" % utils.splitext_plus(base_vcf)[0] if out_dir: out_file = os.path.join(out_dir, os.path.basename(out_file)) sample_ped_lines = {} header = ["#Family_ID", "Individual_ID", "Paternal_ID", "Maternal_ID", "Sex", "Phenotype", "Ethnicity"] for md_ped in list(set([x for x in [tz.get_in(["metadata", "ped"], data) for data in samples] if x is not None])): with open(md_ped) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for parts in reader: if parts[0].startswith("#") and len(parts) > len(header): header = header + parts[len(header):] else: sample_ped_lines[parts[1]] = parts if not utils.file_exists(out_file): with file_transaction(samples[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: want_samples = set(vcfutils.get_samples(base_vcf)) writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(header) for data in samples: ped_info = get_ped_info(data, samples) sname = ped_info["individual_id"] if sname in want_samples: want_samples.remove(sname) if sname in sample_ped_lines: writer.writerow(sample_ped_lines[sname]) else: writer.writerow([ped_info["family_id"], ped_info["individual_id"], ped_info["paternal_id"], ped_info["maternal_id"], ped_info["gender"], ped_info["affected"], ped_info["ethnicity"]]) return out_file
[ "def", "create_ped_file", "(", "samples", ",", "base_vcf", ",", "out_dir", "=", "None", ")", ":", "out_file", "=", "\"%s.ped\"", "%", "utils", ".", "splitext_plus", "(", "base_vcf", ")", "[", "0", "]", "if", "out_dir", ":", "out_file", "=", "os", ".", ...
Create a GEMINI-compatible PED file, including gender, family and phenotype information. Checks for a specified `ped` file in metadata, and will use sample information from this file before reconstituting from metadata information.
[ "Create", "a", "GEMINI", "-", "compatible", "PED", "file", "including", "gender", "family", "and", "phenotype", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L240-L278
224,056
bcbio/bcbio-nextgen
bcbio/variation/population.py
_is_small_vcf
def _is_small_vcf(vcf_file): """Check for small VCFs which we want to analyze quicker. """ count = 0 small_thresh = 250 with utils.open_gzipsafe(vcf_file) as in_handle: for line in in_handle: if not line.startswith("#"): count += 1 if count > small_thresh: return False return True
python
def _is_small_vcf(vcf_file): """Check for small VCFs which we want to analyze quicker. """ count = 0 small_thresh = 250 with utils.open_gzipsafe(vcf_file) as in_handle: for line in in_handle: if not line.startswith("#"): count += 1 if count > small_thresh: return False return True
[ "def", "_is_small_vcf", "(", "vcf_file", ")", ":", "count", "=", "0", "small_thresh", "=", "250", "with", "utils", ".", "open_gzipsafe", "(", "vcf_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "not", "line", ".", "start...
Check for small VCFs which we want to analyze quicker.
[ "Check", "for", "small", "VCFs", "which", "we", "want", "to", "analyze", "quicker", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L286-L297
224,057
bcbio/bcbio-nextgen
bcbio/variation/population.py
get_multisample_vcf
def get_multisample_vcf(fnames, name, caller, data): """Retrieve a multiple sample VCF file in a standard location. Handles inputs with multiple repeated input files from batches. """ unique_fnames = [] for f in fnames: if f not in unique_fnames: unique_fnames.append(f) out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini")) if len(unique_fnames) > 1: gemini_vcf = os.path.join(out_dir, "%s-%s.vcf.gz" % (name, caller)) vrn_file_batch = None for variant in data.get("variants", []): if variant["variantcaller"] == caller and variant.get("vrn_file_batch"): vrn_file_batch = variant["vrn_file_batch"] if vrn_file_batch: utils.symlink_plus(vrn_file_batch, gemini_vcf) return gemini_vcf else: return vcfutils.merge_variant_files(unique_fnames, gemini_vcf, dd.get_ref_file(data), data["config"]) else: gemini_vcf = os.path.join(out_dir, "%s-%s%s" % (name, caller, utils.splitext_plus(unique_fnames[0])[1])) utils.symlink_plus(unique_fnames[0], gemini_vcf) return gemini_vcf
python
def get_multisample_vcf(fnames, name, caller, data): """Retrieve a multiple sample VCF file in a standard location. Handles inputs with multiple repeated input files from batches. """ unique_fnames = [] for f in fnames: if f not in unique_fnames: unique_fnames.append(f) out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini")) if len(unique_fnames) > 1: gemini_vcf = os.path.join(out_dir, "%s-%s.vcf.gz" % (name, caller)) vrn_file_batch = None for variant in data.get("variants", []): if variant["variantcaller"] == caller and variant.get("vrn_file_batch"): vrn_file_batch = variant["vrn_file_batch"] if vrn_file_batch: utils.symlink_plus(vrn_file_batch, gemini_vcf) return gemini_vcf else: return vcfutils.merge_variant_files(unique_fnames, gemini_vcf, dd.get_ref_file(data), data["config"]) else: gemini_vcf = os.path.join(out_dir, "%s-%s%s" % (name, caller, utils.splitext_plus(unique_fnames[0])[1])) utils.symlink_plus(unique_fnames[0], gemini_vcf) return gemini_vcf
[ "def", "get_multisample_vcf", "(", "fnames", ",", "name", ",", "caller", ",", "data", ")", ":", "unique_fnames", "=", "[", "]", "for", "f", "in", "fnames", ":", "if", "f", "not", "in", "unique_fnames", ":", "unique_fnames", ".", "append", "(", "f", ")"...
Retrieve a multiple sample VCF file in a standard location. Handles inputs with multiple repeated input files from batches.
[ "Retrieve", "a", "multiple", "sample", "VCF", "file", "in", "a", "standard", "location", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L299-L324
224,058
bcbio/bcbio-nextgen
bcbio/variation/population.py
get_gemini_files
def get_gemini_files(data): """Enumerate available gemini data files in a standard installation. """ try: from gemini import annotations, config except ImportError: return {} return {"base": config.read_gemini_config()["annotation_dir"], "files": annotations.get_anno_files().values()}
python
def get_gemini_files(data): """Enumerate available gemini data files in a standard installation. """ try: from gemini import annotations, config except ImportError: return {} return {"base": config.read_gemini_config()["annotation_dir"], "files": annotations.get_anno_files().values()}
[ "def", "get_gemini_files", "(", "data", ")", ":", "try", ":", "from", "gemini", "import", "annotations", ",", "config", "except", "ImportError", ":", "return", "{", "}", "return", "{", "\"base\"", ":", "config", ".", "read_gemini_config", "(", ")", "[", "\...
Enumerate available gemini data files in a standard installation.
[ "Enumerate", "available", "gemini", "data", "files", "in", "a", "standard", "installation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L348-L356
224,059
bcbio/bcbio-nextgen
bcbio/variation/population.py
_group_by_batches
def _group_by_batches(samples, check_fn): """Group data items into batches, providing details to retrieve results. """ batch_groups = collections.defaultdict(list) singles = [] out_retrieve = [] extras = [] for data in [x[0] for x in samples]: if check_fn(data): batch = tz.get_in(["metadata", "batch"], data) name = str(dd.get_sample_name(data)) if batch: out_retrieve.append((str(batch), data)) else: out_retrieve.append((name, data)) for vrn in data["variants"]: if vrn.get("population", True): if batch: batch_groups[(str(batch), vrn["variantcaller"])].append((vrn["vrn_file"], data)) else: singles.append((name, vrn["variantcaller"], data, vrn["vrn_file"])) else: extras.append(data) return batch_groups, singles, out_retrieve, extras
python
def _group_by_batches(samples, check_fn): """Group data items into batches, providing details to retrieve results. """ batch_groups = collections.defaultdict(list) singles = [] out_retrieve = [] extras = [] for data in [x[0] for x in samples]: if check_fn(data): batch = tz.get_in(["metadata", "batch"], data) name = str(dd.get_sample_name(data)) if batch: out_retrieve.append((str(batch), data)) else: out_retrieve.append((name, data)) for vrn in data["variants"]: if vrn.get("population", True): if batch: batch_groups[(str(batch), vrn["variantcaller"])].append((vrn["vrn_file"], data)) else: singles.append((name, vrn["variantcaller"], data, vrn["vrn_file"])) else: extras.append(data) return batch_groups, singles, out_retrieve, extras
[ "def", "_group_by_batches", "(", "samples", ",", "check_fn", ")", ":", "batch_groups", "=", "collections", ".", "defaultdict", "(", "list", ")", "singles", "=", "[", "]", "out_retrieve", "=", "[", "]", "extras", "=", "[", "]", "for", "data", "in", "[", ...
Group data items into batches, providing details to retrieve results.
[ "Group", "data", "items", "into", "batches", "providing", "details", "to", "retrieve", "results", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L358-L381
224,060
bcbio/bcbio-nextgen
bcbio/variation/population.py
prep_db_parallel
def prep_db_parallel(samples, parallel_fn): """Prepares gemini databases in parallel, handling jointly called populations. """ batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls) to_process = [] has_batches = False for (name, caller), info in batch_groups.items(): fnames = [x[0] for x in info] to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras]) has_batches = True for name, caller, data, fname in singles: to_process.append([[fname], (str(name), caller, False), [data], extras]) output = parallel_fn("prep_gemini_db", to_process) out_fetch = {} for batch_id, out_file in output: out_fetch[tuple(batch_id)] = out_file out = [] for batch_name, data in out_retrieve: out_variants = [] for vrn in data["variants"]: use_population = vrn.pop("population", True) if use_population: vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])] out_variants.append(vrn) data["variants"] = out_variants out.append([data]) for x in extras: out.append([x]) return out
python
def prep_db_parallel(samples, parallel_fn): """Prepares gemini databases in parallel, handling jointly called populations. """ batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls) to_process = [] has_batches = False for (name, caller), info in batch_groups.items(): fnames = [x[0] for x in info] to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras]) has_batches = True for name, caller, data, fname in singles: to_process.append([[fname], (str(name), caller, False), [data], extras]) output = parallel_fn("prep_gemini_db", to_process) out_fetch = {} for batch_id, out_file in output: out_fetch[tuple(batch_id)] = out_file out = [] for batch_name, data in out_retrieve: out_variants = [] for vrn in data["variants"]: use_population = vrn.pop("population", True) if use_population: vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])] out_variants.append(vrn) data["variants"] = out_variants out.append([data]) for x in extras: out.append([x]) return out
[ "def", "prep_db_parallel", "(", "samples", ",", "parallel_fn", ")", ":", "batch_groups", ",", "singles", ",", "out_retrieve", ",", "extras", "=", "_group_by_batches", "(", "samples", ",", "_has_variant_calls", ")", "to_process", "=", "[", "]", "has_batches", "="...
Prepares gemini databases in parallel, handling jointly called populations.
[ "Prepares", "gemini", "databases", "in", "parallel", "handling", "jointly", "called", "populations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L389-L417
224,061
bcbio/bcbio-nextgen
bcbio/workflow/stormseq.py
_get_s3_files
def _get_s3_files(local_dir, file_info, params): """Retrieve s3 files to local directory, handling STORMSeq inputs. """ assert len(file_info) == 1 files = file_info.values()[0] fnames = [] for k in ["1", "2"]: if files[k] not in fnames: fnames.append(files[k]) out = [] for fname in fnames: bucket, key = fname.replace("s3://", "").split("/", 1) if params["access_key_id"] == "TEST": out.append(os.path.join(local_dir, os.path.basename(key))) else: out.append(s3.get_file(local_dir, bucket, key, params)) return out
python
def _get_s3_files(local_dir, file_info, params): """Retrieve s3 files to local directory, handling STORMSeq inputs. """ assert len(file_info) == 1 files = file_info.values()[0] fnames = [] for k in ["1", "2"]: if files[k] not in fnames: fnames.append(files[k]) out = [] for fname in fnames: bucket, key = fname.replace("s3://", "").split("/", 1) if params["access_key_id"] == "TEST": out.append(os.path.join(local_dir, os.path.basename(key))) else: out.append(s3.get_file(local_dir, bucket, key, params)) return out
[ "def", "_get_s3_files", "(", "local_dir", ",", "file_info", ",", "params", ")", ":", "assert", "len", "(", "file_info", ")", "==", "1", "files", "=", "file_info", ".", "values", "(", ")", "[", "0", "]", "fnames", "=", "[", "]", "for", "k", "in", "[...
Retrieve s3 files to local directory, handling STORMSeq inputs.
[ "Retrieve", "s3", "files", "to", "local", "directory", "handling", "STORMSeq", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/stormseq.py#L22-L38
224,062
bcbio/bcbio-nextgen
bcbio/pipeline/fastq.py
_gzip_fastq
def _gzip_fastq(in_file, out_dir=None): """ gzip a fastq file if it is not already gzipped, handling conversion from bzip to gzipped files """ if fastq.is_fastq(in_file) and not objectstore.is_remote(in_file): if utils.is_bzipped(in_file): return _bzip_gzip(in_file, out_dir) elif not utils.is_gzipped(in_file): if out_dir: gzipped_file = os.path.join(out_dir, os.path.basename(in_file) + ".gz") else: gzipped_file = in_file + ".gz" if file_exists(gzipped_file): return gzipped_file message = "gzipping {in_file} to {gzipped_file}.".format( in_file=in_file, gzipped_file=gzipped_file) with file_transaction(gzipped_file) as tx_gzipped_file: do.run("gzip -c {in_file} > {tx_gzipped_file}".format(**locals()), message) return gzipped_file return in_file
python
def _gzip_fastq(in_file, out_dir=None): """ gzip a fastq file if it is not already gzipped, handling conversion from bzip to gzipped files """ if fastq.is_fastq(in_file) and not objectstore.is_remote(in_file): if utils.is_bzipped(in_file): return _bzip_gzip(in_file, out_dir) elif not utils.is_gzipped(in_file): if out_dir: gzipped_file = os.path.join(out_dir, os.path.basename(in_file) + ".gz") else: gzipped_file = in_file + ".gz" if file_exists(gzipped_file): return gzipped_file message = "gzipping {in_file} to {gzipped_file}.".format( in_file=in_file, gzipped_file=gzipped_file) with file_transaction(gzipped_file) as tx_gzipped_file: do.run("gzip -c {in_file} > {tx_gzipped_file}".format(**locals()), message) return gzipped_file return in_file
[ "def", "_gzip_fastq", "(", "in_file", ",", "out_dir", "=", "None", ")", ":", "if", "fastq", ".", "is_fastq", "(", "in_file", ")", "and", "not", "objectstore", ".", "is_remote", "(", "in_file", ")", ":", "if", "utils", ".", "is_bzipped", "(", "in_file", ...
gzip a fastq file if it is not already gzipped, handling conversion from bzip to gzipped files
[ "gzip", "a", "fastq", "file", "if", "it", "is", "not", "already", "gzipped", "handling", "conversion", "from", "bzip", "to", "gzipped", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/fastq.py#L51-L72
224,063
bcbio/bcbio-nextgen
bcbio/pipeline/fastq.py
_pipeline_needs_fastq
def _pipeline_needs_fastq(config, data): """Determine if the pipeline can proceed with a BAM file, or needs fastq conversion. """ aligner = config["algorithm"].get("aligner") support_bam = aligner in alignment.metadata.get("support_bam", []) return aligner and not support_bam
python
def _pipeline_needs_fastq(config, data): """Determine if the pipeline can proceed with a BAM file, or needs fastq conversion. """ aligner = config["algorithm"].get("aligner") support_bam = aligner in alignment.metadata.get("support_bam", []) return aligner and not support_bam
[ "def", "_pipeline_needs_fastq", "(", "config", ",", "data", ")", ":", "aligner", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"aligner\"", ")", "support_bam", "=", "aligner", "in", "alignment", ".", "metadata", ".", "get", "(", "\"support_bam...
Determine if the pipeline can proceed with a BAM file, or needs fastq conversion.
[ "Determine", "if", "the", "pipeline", "can", "proceed", "with", "a", "BAM", "file", "or", "needs", "fastq", "conversion", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/fastq.py#L95-L100
224,064
bcbio/bcbio-nextgen
bcbio/pipeline/fastq.py
convert_bam_to_fastq
def convert_bam_to_fastq(in_file, work_dir, data, dirs, config): """Convert BAM input file into FASTQ files. """ return alignprep.prep_fastq_inputs([in_file], data)
python
def convert_bam_to_fastq(in_file, work_dir, data, dirs, config): """Convert BAM input file into FASTQ files. """ return alignprep.prep_fastq_inputs([in_file], data)
[ "def", "convert_bam_to_fastq", "(", "in_file", ",", "work_dir", ",", "data", ",", "dirs", ",", "config", ")", ":", "return", "alignprep", ".", "prep_fastq_inputs", "(", "[", "in_file", "]", ",", "data", ")" ]
Convert BAM input file into FASTQ files.
[ "Convert", "BAM", "input", "file", "into", "FASTQ", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/fastq.py#L103-L106
224,065
bcbio/bcbio-nextgen
bcbio/pipeline/fastq.py
merge
def merge(files, out_file, config): """merge smartly fastq files. It recognizes paired fastq files.""" pair1 = [fastq_file[0] for fastq_file in files] if len(files[0]) > 1: path = splitext_plus(out_file) pair1_out_file = path[0] + "_R1" + path[1] pair2 = [fastq_file[1] for fastq_file in files] pair2_out_file = path[0] + "_R2" + path[1] _merge_list_fastqs(pair1, pair1_out_file, config) _merge_list_fastqs(pair2, pair2_out_file, config) return [pair1_out_file, pair2_out_file] else: return _merge_list_fastqs(pair1, out_file, config)
python
def merge(files, out_file, config): """merge smartly fastq files. It recognizes paired fastq files.""" pair1 = [fastq_file[0] for fastq_file in files] if len(files[0]) > 1: path = splitext_plus(out_file) pair1_out_file = path[0] + "_R1" + path[1] pair2 = [fastq_file[1] for fastq_file in files] pair2_out_file = path[0] + "_R2" + path[1] _merge_list_fastqs(pair1, pair1_out_file, config) _merge_list_fastqs(pair2, pair2_out_file, config) return [pair1_out_file, pair2_out_file] else: return _merge_list_fastqs(pair1, out_file, config)
[ "def", "merge", "(", "files", ",", "out_file", ",", "config", ")", ":", "pair1", "=", "[", "fastq_file", "[", "0", "]", "for", "fastq_file", "in", "files", "]", "if", "len", "(", "files", "[", "0", "]", ")", ">", "1", ":", "path", "=", "splitext_...
merge smartly fastq files. It recognizes paired fastq files.
[ "merge", "smartly", "fastq", "files", ".", "It", "recognizes", "paired", "fastq", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/fastq.py#L108-L120
224,066
bcbio/bcbio-nextgen
bcbio/pipeline/fastq.py
_merge_list_fastqs
def _merge_list_fastqs(files, out_file, config): """merge list of fastq files into one""" if not all(map(fastq.is_fastq, files)): raise ValueError("Not all of the files to merge are fastq files: %s " % (files)) assert all(map(utils.file_exists, files)), ("Not all of the files to merge " "exist: %s" % (files)) if not file_exists(out_file): files = [_gzip_fastq(fn) for fn in files] if len(files) == 1: if "remove_source" in config and config["remove_source"]: shutil.move(files[0], out_file) else: os.symlink(files[0], out_file) return out_file with file_transaction(out_file) as file_txt_out: files_str = " ".join(list(files)) cmd = "cat {files_str} > {file_txt_out}".format(**locals()) do.run(cmd, "merge fastq files %s" % files) return out_file
python
def _merge_list_fastqs(files, out_file, config): """merge list of fastq files into one""" if not all(map(fastq.is_fastq, files)): raise ValueError("Not all of the files to merge are fastq files: %s " % (files)) assert all(map(utils.file_exists, files)), ("Not all of the files to merge " "exist: %s" % (files)) if not file_exists(out_file): files = [_gzip_fastq(fn) for fn in files] if len(files) == 1: if "remove_source" in config and config["remove_source"]: shutil.move(files[0], out_file) else: os.symlink(files[0], out_file) return out_file with file_transaction(out_file) as file_txt_out: files_str = " ".join(list(files)) cmd = "cat {files_str} > {file_txt_out}".format(**locals()) do.run(cmd, "merge fastq files %s" % files) return out_file
[ "def", "_merge_list_fastqs", "(", "files", ",", "out_file", ",", "config", ")", ":", "if", "not", "all", "(", "map", "(", "fastq", ".", "is_fastq", ",", "files", ")", ")", ":", "raise", "ValueError", "(", "\"Not all of the files to merge are fastq files: %s \"",...
merge list of fastq files into one
[ "merge", "list", "of", "fastq", "files", "into", "one" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/fastq.py#L122-L140
224,067
bcbio/bcbio-nextgen
bcbio/bed/__init__.py
decomment
def decomment(bed_file, out_file): """ clean a BED file """ if file_exists(out_file): return out_file with utils.open_gzipsafe(bed_file) as in_handle, open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#") or line.startswith("browser") or line.startswith("track"): continue else: out_handle.write(line) return out_file
python
def decomment(bed_file, out_file): """ clean a BED file """ if file_exists(out_file): return out_file with utils.open_gzipsafe(bed_file) as in_handle, open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#") or line.startswith("browser") or line.startswith("track"): continue else: out_handle.write(line) return out_file
[ "def", "decomment", "(", "bed_file", ",", "out_file", ")", ":", "if", "file_exists", "(", "out_file", ")", ":", "return", "out_file", "with", "utils", ".", "open_gzipsafe", "(", "bed_file", ")", "as", "in_handle", ",", "open", "(", "out_file", ",", "\"w\""...
clean a BED file
[ "clean", "a", "BED", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bed/__init__.py#L5-L18
224,068
bcbio/bcbio-nextgen
bcbio/bed/__init__.py
concat
def concat(bed_files, catted=None): """ recursively concat a set of BED files, returning a sorted bedtools object of the result """ bed_files = [x for x in bed_files if x] if len(bed_files) == 0: if catted: # move to a .bed extension for downstream tools if not already sorted_bed = catted.sort() if not sorted_bed.fn.endswith(".bed"): return sorted_bed.moveto(sorted_bed.fn + ".bed") else: return sorted_bed else: return catted if not catted: bed_files = list(bed_files) catted = bt.BedTool(bed_files.pop()) else: catted = catted.cat(bed_files.pop(), postmerge=False, force_truncate=False) return concat(bed_files, catted)
python
def concat(bed_files, catted=None): """ recursively concat a set of BED files, returning a sorted bedtools object of the result """ bed_files = [x for x in bed_files if x] if len(bed_files) == 0: if catted: # move to a .bed extension for downstream tools if not already sorted_bed = catted.sort() if not sorted_bed.fn.endswith(".bed"): return sorted_bed.moveto(sorted_bed.fn + ".bed") else: return sorted_bed else: return catted if not catted: bed_files = list(bed_files) catted = bt.BedTool(bed_files.pop()) else: catted = catted.cat(bed_files.pop(), postmerge=False, force_truncate=False) return concat(bed_files, catted)
[ "def", "concat", "(", "bed_files", ",", "catted", "=", "None", ")", ":", "bed_files", "=", "[", "x", "for", "x", "in", "bed_files", "if", "x", "]", "if", "len", "(", "bed_files", ")", "==", "0", ":", "if", "catted", ":", "# move to a .bed extension for...
recursively concat a set of BED files, returning a sorted bedtools object of the result
[ "recursively", "concat", "a", "set", "of", "BED", "files", "returning", "a", "sorted", "bedtools", "object", "of", "the", "result" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bed/__init__.py#L20-L44
224,069
bcbio/bcbio-nextgen
bcbio/bed/__init__.py
merge
def merge(bedfiles): """ given a BED file or list of BED files merge them an return a bedtools object """ if isinstance(bedfiles, list): catted = concat(bedfiles) else: catted = concat([bedfiles]) if catted: return concat(bedfiles).sort().merge() else: return catted
python
def merge(bedfiles): """ given a BED file or list of BED files merge them an return a bedtools object """ if isinstance(bedfiles, list): catted = concat(bedfiles) else: catted = concat([bedfiles]) if catted: return concat(bedfiles).sort().merge() else: return catted
[ "def", "merge", "(", "bedfiles", ")", ":", "if", "isinstance", "(", "bedfiles", ",", "list", ")", ":", "catted", "=", "concat", "(", "bedfiles", ")", "else", ":", "catted", "=", "concat", "(", "[", "bedfiles", "]", ")", "if", "catted", ":", "return",...
given a BED file or list of BED files merge them an return a bedtools object
[ "given", "a", "BED", "file", "or", "list", "of", "BED", "files", "merge", "them", "an", "return", "a", "bedtools", "object" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bed/__init__.py#L46-L57
224,070
bcbio/bcbio-nextgen
bcbio/structural/hydra.py
select_unaligned_read_pairs
def select_unaligned_read_pairs(in_bam, extra, out_dir, config): """Retrieve unaligned read pairs from input alignment BAM, as two fastq files. """ runner = broad.runner_from_config(config) base, ext = os.path.splitext(os.path.basename(in_bam)) nomap_bam = os.path.join(out_dir, "{}-{}{}".format(base, extra, ext)) if not utils.file_exists(nomap_bam): with file_transaction(nomap_bam) as tx_out: runner.run("FilterSamReads", [("INPUT", in_bam), ("OUTPUT", tx_out), ("EXCLUDE_ALIGNED", "true"), ("WRITE_READS_FILES", "false"), ("SORT_ORDER", "queryname")]) has_reads = False with pysam.Samfile(nomap_bam, "rb") as in_pysam: for read in in_pysam: if read.is_paired: has_reads = True break if has_reads: out_fq1, out_fq2 = ["{}-{}.fq".format(os.path.splitext(nomap_bam)[0], i) for i in [1, 2]] runner.run_fn("picard_bam_to_fastq", nomap_bam, out_fq1, out_fq2) return out_fq1, out_fq2 else: return None, None
python
def select_unaligned_read_pairs(in_bam, extra, out_dir, config): """Retrieve unaligned read pairs from input alignment BAM, as two fastq files. """ runner = broad.runner_from_config(config) base, ext = os.path.splitext(os.path.basename(in_bam)) nomap_bam = os.path.join(out_dir, "{}-{}{}".format(base, extra, ext)) if not utils.file_exists(nomap_bam): with file_transaction(nomap_bam) as tx_out: runner.run("FilterSamReads", [("INPUT", in_bam), ("OUTPUT", tx_out), ("EXCLUDE_ALIGNED", "true"), ("WRITE_READS_FILES", "false"), ("SORT_ORDER", "queryname")]) has_reads = False with pysam.Samfile(nomap_bam, "rb") as in_pysam: for read in in_pysam: if read.is_paired: has_reads = True break if has_reads: out_fq1, out_fq2 = ["{}-{}.fq".format(os.path.splitext(nomap_bam)[0], i) for i in [1, 2]] runner.run_fn("picard_bam_to_fastq", nomap_bam, out_fq1, out_fq2) return out_fq1, out_fq2 else: return None, None
[ "def", "select_unaligned_read_pairs", "(", "in_bam", ",", "extra", ",", "out_dir", ",", "config", ")", ":", "runner", "=", "broad", ".", "runner_from_config", "(", "config", ")", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "os", "."...
Retrieve unaligned read pairs from input alignment BAM, as two fastq files.
[ "Retrieve", "unaligned", "read", "pairs", "from", "input", "alignment", "BAM", "as", "two", "fastq", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/hydra.py#L22-L46
224,071
bcbio/bcbio-nextgen
bcbio/structural/hydra.py
remove_nopairs
def remove_nopairs(in_bam, out_dir, config): """Remove any reads without both pairs present in the file. """ runner = broad.runner_from_config(config) out_bam = os.path.join(out_dir, "{}-safepair{}".format(*os.path.splitext(os.path.basename(in_bam)))) if not utils.file_exists(out_bam): read_counts = collections.defaultdict(int) with pysam.Samfile(in_bam, "rb") as in_pysam: for read in in_pysam: if read.is_paired: read_counts[read.qname] += 1 with pysam.Samfile(in_bam, "rb") as in_pysam: with file_transaction(out_bam) as tx_out_bam: with pysam.Samfile(tx_out_bam, "wb", template=in_pysam) as out_pysam: for read in in_pysam: if read_counts[read.qname] == 2: out_pysam.write(read) return runner.run_fn("picard_sort", out_bam, "queryname")
python
def remove_nopairs(in_bam, out_dir, config): """Remove any reads without both pairs present in the file. """ runner = broad.runner_from_config(config) out_bam = os.path.join(out_dir, "{}-safepair{}".format(*os.path.splitext(os.path.basename(in_bam)))) if not utils.file_exists(out_bam): read_counts = collections.defaultdict(int) with pysam.Samfile(in_bam, "rb") as in_pysam: for read in in_pysam: if read.is_paired: read_counts[read.qname] += 1 with pysam.Samfile(in_bam, "rb") as in_pysam: with file_transaction(out_bam) as tx_out_bam: with pysam.Samfile(tx_out_bam, "wb", template=in_pysam) as out_pysam: for read in in_pysam: if read_counts[read.qname] == 2: out_pysam.write(read) return runner.run_fn("picard_sort", out_bam, "queryname")
[ "def", "remove_nopairs", "(", "in_bam", ",", "out_dir", ",", "config", ")", ":", "runner", "=", "broad", ".", "runner_from_config", "(", "config", ")", "out_bam", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"{}-safepair{}\"", ".", "format",...
Remove any reads without both pairs present in the file.
[ "Remove", "any", "reads", "without", "both", "pairs", "present", "in", "the", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/hydra.py#L48-L65
224,072
bcbio/bcbio-nextgen
bcbio/structural/hydra.py
tiered_alignment
def tiered_alignment(in_bam, tier_num, multi_mappers, extra_args, genome_build, pair_stats, work_dir, dirs, config): """Perform the alignment of non-mapped reads from previous tier. """ nomap_fq1, nomap_fq2 = select_unaligned_read_pairs(in_bam, "tier{}".format(tier_num), work_dir, config) if nomap_fq1 is not None: base_name = "{}-tier{}out".format(os.path.splitext(os.path.basename(in_bam))[0], tier_num) config = copy.deepcopy(config) dirs = copy.deepcopy(dirs) config["algorithm"]["bam_sort"] = "queryname" config["algorithm"]["multiple_mappers"] = multi_mappers config["algorithm"]["extra_align_args"] = ["-i", int(pair_stats["mean"]), int(pair_stats["std"])] + extra_args out_bam, ref_file = align_to_sort_bam(nomap_fq1, nomap_fq2, lane.rg_names(base_name, base_name, config), genome_build, "novoalign", dirs, config, dir_ext=os.path.join("hydra", os.path.split(nomap_fq1)[0])) return out_bam else: return None
python
def tiered_alignment(in_bam, tier_num, multi_mappers, extra_args, genome_build, pair_stats, work_dir, dirs, config): """Perform the alignment of non-mapped reads from previous tier. """ nomap_fq1, nomap_fq2 = select_unaligned_read_pairs(in_bam, "tier{}".format(tier_num), work_dir, config) if nomap_fq1 is not None: base_name = "{}-tier{}out".format(os.path.splitext(os.path.basename(in_bam))[0], tier_num) config = copy.deepcopy(config) dirs = copy.deepcopy(dirs) config["algorithm"]["bam_sort"] = "queryname" config["algorithm"]["multiple_mappers"] = multi_mappers config["algorithm"]["extra_align_args"] = ["-i", int(pair_stats["mean"]), int(pair_stats["std"])] + extra_args out_bam, ref_file = align_to_sort_bam(nomap_fq1, nomap_fq2, lane.rg_names(base_name, base_name, config), genome_build, "novoalign", dirs, config, dir_ext=os.path.join("hydra", os.path.split(nomap_fq1)[0])) return out_bam else: return None
[ "def", "tiered_alignment", "(", "in_bam", ",", "tier_num", ",", "multi_mappers", ",", "extra_args", ",", "genome_build", ",", "pair_stats", ",", "work_dir", ",", "dirs", ",", "config", ")", ":", "nomap_fq1", ",", "nomap_fq2", "=", "select_unaligned_read_pairs", ...
Perform the alignment of non-mapped reads from previous tier.
[ "Perform", "the", "alignment", "of", "non", "-", "mapped", "reads", "from", "previous", "tier", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/hydra.py#L68-L91
224,073
bcbio/bcbio-nextgen
bcbio/structural/hydra.py
convert_bam_to_bed
def convert_bam_to_bed(in_bam, out_file): """Convert BAM to bed file using BEDTools. """ with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: subprocess.check_call(["bamToBed", "-i", in_bam, "-tag", "NM"], stdout=out_handle) return out_file
python
def convert_bam_to_bed(in_bam, out_file): """Convert BAM to bed file using BEDTools. """ with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: subprocess.check_call(["bamToBed", "-i", in_bam, "-tag", "NM"], stdout=out_handle) return out_file
[ "def", "convert_bam_to_bed", "(", "in_bam", ",", "out_file", ")", ":", "with", "file_transaction", "(", "out_file", ")", "as", "tx_out_file", ":", "with", "open", "(", "tx_out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "subprocess", ".", "check_call", ...
Convert BAM to bed file using BEDTools.
[ "Convert", "BAM", "to", "bed", "file", "using", "BEDTools", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/hydra.py#L96-L103
224,074
bcbio/bcbio-nextgen
bcbio/structural/hydra.py
hydra_breakpoints
def hydra_breakpoints(in_bam, pair_stats): """Detect structural variation breakpoints with hydra. """ in_bed = convert_bam_to_bed(in_bam) if os.path.getsize(in_bed) > 0: pair_bed = pair_discordants(in_bed, pair_stats) dedup_bed = dedup_discordants(pair_bed) return run_hydra(dedup_bed, pair_stats) else: return None
python
def hydra_breakpoints(in_bam, pair_stats): """Detect structural variation breakpoints with hydra. """ in_bed = convert_bam_to_bed(in_bam) if os.path.getsize(in_bed) > 0: pair_bed = pair_discordants(in_bed, pair_stats) dedup_bed = dedup_discordants(pair_bed) return run_hydra(dedup_bed, pair_stats) else: return None
[ "def", "hydra_breakpoints", "(", "in_bam", ",", "pair_stats", ")", ":", "in_bed", "=", "convert_bam_to_bed", "(", "in_bam", ")", "if", "os", ".", "path", ".", "getsize", "(", "in_bed", ")", ">", "0", ":", "pair_bed", "=", "pair_discordants", "(", "in_bed",...
Detect structural variation breakpoints with hydra.
[ "Detect", "structural", "variation", "breakpoints", "with", "hydra", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/hydra.py#L135-L144
224,075
bcbio/bcbio-nextgen
bcbio/structural/hydra.py
detect_sv
def detect_sv(align_bam, genome_build, dirs, config): """Detect structural variation from discordant aligned pairs. """ work_dir = utils.safe_makedir(os.path.join(dirs["work"], "structural")) pair_stats = shared.calc_paired_insert_stats(align_bam) fix_bam = remove_nopairs(align_bam, work_dir, config) tier2_align = tiered_alignment(fix_bam, "2", True, [], genome_build, pair_stats, work_dir, dirs, config) if tier2_align: tier3_align = tiered_alignment(tier2_align, "3", "Ex 1100", ["-t", "300"], genome_build, pair_stats, work_dir, dirs, config) if tier3_align: hydra_bps = hydra_breakpoints(tier3_align, pair_stats)
python
def detect_sv(align_bam, genome_build, dirs, config): """Detect structural variation from discordant aligned pairs. """ work_dir = utils.safe_makedir(os.path.join(dirs["work"], "structural")) pair_stats = shared.calc_paired_insert_stats(align_bam) fix_bam = remove_nopairs(align_bam, work_dir, config) tier2_align = tiered_alignment(fix_bam, "2", True, [], genome_build, pair_stats, work_dir, dirs, config) if tier2_align: tier3_align = tiered_alignment(tier2_align, "3", "Ex 1100", ["-t", "300"], genome_build, pair_stats, work_dir, dirs, config) if tier3_align: hydra_bps = hydra_breakpoints(tier3_align, pair_stats)
[ "def", "detect_sv", "(", "align_bam", ",", "genome_build", ",", "dirs", ",", "config", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dirs", "[", "\"work\"", "]", ",", "\"structural\"", ")", ")", "pa...
Detect structural variation from discordant aligned pairs.
[ "Detect", "structural", "variation", "from", "discordant", "aligned", "pairs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/hydra.py#L148-L162
224,076
bcbio/bcbio-nextgen
bcbio/log/logbook_zmqpush.py
inject
def inject(**params): """ A Logbook processor to inject arbitrary information into log records. Simply pass in keyword arguments and use as a context manager: >>> with inject(identifier=str(uuid.uuid4())).applicationbound(): ... logger.debug('Something happened') """ def callback(log_record): log_record.extra.update(params) return logbook.Processor(callback)
python
def inject(**params): """ A Logbook processor to inject arbitrary information into log records. Simply pass in keyword arguments and use as a context manager: >>> with inject(identifier=str(uuid.uuid4())).applicationbound(): ... logger.debug('Something happened') """ def callback(log_record): log_record.extra.update(params) return logbook.Processor(callback)
[ "def", "inject", "(", "*", "*", "params", ")", ":", "def", "callback", "(", "log_record", ")", ":", "log_record", ".", "extra", ".", "update", "(", "params", ")", "return", "logbook", ".", "Processor", "(", "callback", ")" ]
A Logbook processor to inject arbitrary information into log records. Simply pass in keyword arguments and use as a context manager: >>> with inject(identifier=str(uuid.uuid4())).applicationbound(): ... logger.debug('Something happened')
[ "A", "Logbook", "processor", "to", "inject", "arbitrary", "information", "into", "log", "records", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/log/logbook_zmqpush.py#L121-L134
224,077
bcbio/bcbio-nextgen
bcbio/log/logbook_zmqpush.py
ZeroMQPullSubscriber.recv
def recv(self, timeout=None): """Overwrite standard recv for timeout calls to catch interrupt errors. """ if timeout: try: testsock = self._zmq.select([self.socket], [], [], timeout)[0] except zmq.ZMQError as e: if e.errno == errno.EINTR: testsock = None else: raise if not testsock: return rv = self.socket.recv(self._zmq.NOBLOCK) return LogRecord.from_dict(json.loads(rv)) else: return super(ZeroMQPullSubscriber, self).recv(timeout)
python
def recv(self, timeout=None): """Overwrite standard recv for timeout calls to catch interrupt errors. """ if timeout: try: testsock = self._zmq.select([self.socket], [], [], timeout)[0] except zmq.ZMQError as e: if e.errno == errno.EINTR: testsock = None else: raise if not testsock: return rv = self.socket.recv(self._zmq.NOBLOCK) return LogRecord.from_dict(json.loads(rv)) else: return super(ZeroMQPullSubscriber, self).recv(timeout)
[ "def", "recv", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", ":", "try", ":", "testsock", "=", "self", ".", "_zmq", ".", "select", "(", "[", "self", ".", "socket", "]", ",", "[", "]", ",", "[", "]", ",", "timeout", ")", ...
Overwrite standard recv for timeout calls to catch interrupt errors.
[ "Overwrite", "standard", "recv", "for", "timeout", "calls", "to", "catch", "interrupt", "errors", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/log/logbook_zmqpush.py#L98-L114
224,078
bcbio/bcbio-nextgen
bcbio/variation/pisces.py
run
def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run tumor only pisces calling Handles bgzipping output file and fixing VCF sample naming to match BAM sample. """ paired = vcfutils.get_paired_bams(align_bams, items) assert paired and not paired.normal_bam, ("Pisces supports tumor-only variant calling: %s" % (",".join([dd.get_sample_name(d) for d in items]))) vrs = bedutils.population_variant_regions(items) target = shared.subset_variant_regions(vrs, region, out_file, items=items, do_merge=True) min_af = float(dd.get_min_allele_fraction(paired.tumor_data)) / 100.0 if not utils.file_exists(out_file): base_out_name = utils.splitext_plus(os.path.basename(paired.tumor_bam))[0] raw_file = "%s.vcf" % utils.splitext_plus(out_file)[0] with file_transaction(paired.tumor_data, raw_file) as tx_out_file: ref_dir = _prep_genome(os.path.dirname(tx_out_file), paired.tumor_data) out_dir = os.path.dirname(tx_out_file) cores = dd.get_num_cores(paired.tumor_data) emit_min_af = min_af / 10.0 cmd = ("pisces --bampaths {paired.tumor_bam} --genomepaths {ref_dir} --intervalpaths {target} " "--maxthreads {cores} --minvf {emit_min_af} --vffilter {min_af} " "--ploidy somatic --gvcf false -o {out_dir}") # Recommended filtering for low frequency indels # https://github.com/bcbio/bcbio-nextgen/commit/49d0cbb1f6dcbea629c63749e2f9813bd06dcee3#commitcomment-29765373 cmd += " -RMxNFilter 5,9,0.35" # For low frequency UMI tagged variants, set higher variant thresholds # https://github.com/Illumina/Pisces/issues/14#issuecomment-399756862 if min_af < (1.0 / 100.0): cmd += " --minbasecallquality 30" do.run(cmd.format(**locals()), "Pisces tumor-only somatic calling") shutil.move(os.path.join(out_dir, "%s.vcf" % base_out_name), tx_out_file) vcfutils.bgzip_and_index(raw_file, paired.tumor_data["config"], prep_cmd="sed 's#%s.bam#%s#' | %s" % (base_out_name, dd.get_sample_name(paired.tumor_data), vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
python
def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run tumor only pisces calling Handles bgzipping output file and fixing VCF sample naming to match BAM sample. """ paired = vcfutils.get_paired_bams(align_bams, items) assert paired and not paired.normal_bam, ("Pisces supports tumor-only variant calling: %s" % (",".join([dd.get_sample_name(d) for d in items]))) vrs = bedutils.population_variant_regions(items) target = shared.subset_variant_regions(vrs, region, out_file, items=items, do_merge=True) min_af = float(dd.get_min_allele_fraction(paired.tumor_data)) / 100.0 if not utils.file_exists(out_file): base_out_name = utils.splitext_plus(os.path.basename(paired.tumor_bam))[0] raw_file = "%s.vcf" % utils.splitext_plus(out_file)[0] with file_transaction(paired.tumor_data, raw_file) as tx_out_file: ref_dir = _prep_genome(os.path.dirname(tx_out_file), paired.tumor_data) out_dir = os.path.dirname(tx_out_file) cores = dd.get_num_cores(paired.tumor_data) emit_min_af = min_af / 10.0 cmd = ("pisces --bampaths {paired.tumor_bam} --genomepaths {ref_dir} --intervalpaths {target} " "--maxthreads {cores} --minvf {emit_min_af} --vffilter {min_af} " "--ploidy somatic --gvcf false -o {out_dir}") # Recommended filtering for low frequency indels # https://github.com/bcbio/bcbio-nextgen/commit/49d0cbb1f6dcbea629c63749e2f9813bd06dcee3#commitcomment-29765373 cmd += " -RMxNFilter 5,9,0.35" # For low frequency UMI tagged variants, set higher variant thresholds # https://github.com/Illumina/Pisces/issues/14#issuecomment-399756862 if min_af < (1.0 / 100.0): cmd += " --minbasecallquality 30" do.run(cmd.format(**locals()), "Pisces tumor-only somatic calling") shutil.move(os.path.join(out_dir, "%s.vcf" % base_out_name), tx_out_file) vcfutils.bgzip_and_index(raw_file, paired.tumor_data["config"], prep_cmd="sed 's#%s.bam#%s#' | %s" % (base_out_name, dd.get_sample_name(paired.tumor_data), vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
[ "def", "run", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "paired", "=", "vcfutils", ".", "get_paired_bams", "(", "align_bams", ",", "items", ")", "assert", "pa...
Run tumor only pisces calling Handles bgzipping output file and fixing VCF sample naming to match BAM sample.
[ "Run", "tumor", "only", "pisces", "calling" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/pisces.py#L17-L54
224,079
bcbio/bcbio-nextgen
bcbio/variation/pisces.py
_prep_genome
def _prep_genome(out_dir, data): """Create prepped reference directory for pisces. Requires a custom GenomeSize.xml file present. """ genome_name = utils.splitext_plus(os.path.basename(dd.get_ref_file(data)))[0] out_dir = utils.safe_makedir(os.path.join(out_dir, genome_name)) ref_file = dd.get_ref_file(data) utils.symlink_plus(ref_file, os.path.join(out_dir, os.path.basename(ref_file))) with open(os.path.join(out_dir, "GenomeSize.xml"), "w") as out_handle: out_handle.write('<sequenceSizes genomeName="%s">' % genome_name) for c in pysam.AlignmentFile("%s.dict" % utils.splitext_plus(ref_file)[0]).header["SQ"]: cur_ploidy = ploidy.get_ploidy([data], region=[c["SN"]]) out_handle.write('<chromosome fileName="%s" contigName="%s" totalBases="%s" knownBases="%s" ' 'isCircular="false" ploidy="%s" md5="%s"/>' % (os.path.basename(ref_file), c["SN"], c["LN"], c["LN"], cur_ploidy, c["M5"])) out_handle.write('</sequenceSizes>') return out_dir
python
def _prep_genome(out_dir, data): """Create prepped reference directory for pisces. Requires a custom GenomeSize.xml file present. """ genome_name = utils.splitext_plus(os.path.basename(dd.get_ref_file(data)))[0] out_dir = utils.safe_makedir(os.path.join(out_dir, genome_name)) ref_file = dd.get_ref_file(data) utils.symlink_plus(ref_file, os.path.join(out_dir, os.path.basename(ref_file))) with open(os.path.join(out_dir, "GenomeSize.xml"), "w") as out_handle: out_handle.write('<sequenceSizes genomeName="%s">' % genome_name) for c in pysam.AlignmentFile("%s.dict" % utils.splitext_plus(ref_file)[0]).header["SQ"]: cur_ploidy = ploidy.get_ploidy([data], region=[c["SN"]]) out_handle.write('<chromosome fileName="%s" contigName="%s" totalBases="%s" knownBases="%s" ' 'isCircular="false" ploidy="%s" md5="%s"/>' % (os.path.basename(ref_file), c["SN"], c["LN"], c["LN"], cur_ploidy, c["M5"])) out_handle.write('</sequenceSizes>') return out_dir
[ "def", "_prep_genome", "(", "out_dir", ",", "data", ")", ":", "genome_name", "=", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(", "dd", ".", "get_ref_file", "(", "data", ")", ")", ")", "[", "0", "]", "out_dir", "=", "util...
Create prepped reference directory for pisces. Requires a custom GenomeSize.xml file present.
[ "Create", "prepped", "reference", "directory", "for", "pisces", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/pisces.py#L56-L73
224,080
bcbio/bcbio-nextgen
bcbio/variation/deepvariant.py
run
def run(align_bams, items, ref_file, assoc_files, region, out_file): """Return DeepVariant calling on germline samples. region can be a single region or list of multiple regions for multicore calling. """ assert not vcfutils.is_paired_analysis(align_bams, items), \ ("DeepVariant currently only supports germline calling: %s" % (", ".join([dd.get_sample_name(d) for d in items]))) assert len(items) == 1, \ ("DeepVariant currently only supports single sample calling: %s" % (", ".join([dd.get_sample_name(d) for d in items]))) out_file = _run_germline(align_bams[0], items[0], ref_file, region, out_file) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
python
def run(align_bams, items, ref_file, assoc_files, region, out_file): """Return DeepVariant calling on germline samples. region can be a single region or list of multiple regions for multicore calling. """ assert not vcfutils.is_paired_analysis(align_bams, items), \ ("DeepVariant currently only supports germline calling: %s" % (", ".join([dd.get_sample_name(d) for d in items]))) assert len(items) == 1, \ ("DeepVariant currently only supports single sample calling: %s" % (", ".join([dd.get_sample_name(d) for d in items]))) out_file = _run_germline(align_bams[0], items[0], ref_file, region, out_file) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
[ "def", "run", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", ",", "out_file", ")", ":", "assert", "not", "vcfutils", ".", "is_paired_analysis", "(", "align_bams", ",", "items", ")", ",", "(", "\"DeepVariant currently only ...
Return DeepVariant calling on germline samples. region can be a single region or list of multiple regions for multicore calling.
[ "Return", "DeepVariant", "calling", "on", "germline", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/deepvariant.py#L12-L25
224,081
bcbio/bcbio-nextgen
bcbio/variation/deepvariant.py
_run_germline
def _run_germline(bam_file, data, ref_file, region, out_file): """Single sample germline variant calling. """ work_dir = utils.safe_makedir("%s-work" % utils.splitext_plus(out_file)[0]) region_bed = strelka2.get_region_bed(region, [data], out_file, want_gzip=False) example_dir = _make_examples(bam_file, data, ref_file, region_bed, out_file, work_dir) if _has_candidate_variants(example_dir): tfrecord_file = _call_variants(example_dir, region_bed, data, out_file) return _postprocess_variants(tfrecord_file, data, ref_file, out_file) else: return vcfutils.write_empty_vcf(out_file, data["config"], [dd.get_sample_name(data)])
python
def _run_germline(bam_file, data, ref_file, region, out_file): """Single sample germline variant calling. """ work_dir = utils.safe_makedir("%s-work" % utils.splitext_plus(out_file)[0]) region_bed = strelka2.get_region_bed(region, [data], out_file, want_gzip=False) example_dir = _make_examples(bam_file, data, ref_file, region_bed, out_file, work_dir) if _has_candidate_variants(example_dir): tfrecord_file = _call_variants(example_dir, region_bed, data, out_file) return _postprocess_variants(tfrecord_file, data, ref_file, out_file) else: return vcfutils.write_empty_vcf(out_file, data["config"], [dd.get_sample_name(data)])
[ "def", "_run_germline", "(", "bam_file", ",", "data", ",", "ref_file", ",", "region", ",", "out_file", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "\"%s-work\"", "%", "utils", ".", "splitext_plus", "(", "out_file", ")", "[", "0", "]", "...
Single sample germline variant calling.
[ "Single", "sample", "germline", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/deepvariant.py#L27-L37
224,082
bcbio/bcbio-nextgen
bcbio/variation/deepvariant.py
_make_examples
def _make_examples(bam_file, data, ref_file, region_bed, out_file, work_dir): """Create example pileup images to feed into variant calling. """ log_dir = utils.safe_makedir(os.path.join(work_dir, "log")) example_dir = utils.safe_makedir(os.path.join(work_dir, "examples")) if len(glob.glob(os.path.join(example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data)))) == 0: with tx_tmpdir(data) as tx_example_dir: cmd = ["dv_make_examples.py", "--cores", dd.get_num_cores(data), "--ref", ref_file, "--reads", bam_file, "--regions", region_bed, "--logdir", log_dir, "--examples", tx_example_dir, "--sample", dd.get_sample_name(data)] do.run(cmd, "DeepVariant make_examples %s" % dd.get_sample_name(data)) for fname in glob.glob(os.path.join(tx_example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data))): utils.copy_plus(fname, os.path.join(example_dir, os.path.basename(fname))) return example_dir
python
def _make_examples(bam_file, data, ref_file, region_bed, out_file, work_dir): """Create example pileup images to feed into variant calling. """ log_dir = utils.safe_makedir(os.path.join(work_dir, "log")) example_dir = utils.safe_makedir(os.path.join(work_dir, "examples")) if len(glob.glob(os.path.join(example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data)))) == 0: with tx_tmpdir(data) as tx_example_dir: cmd = ["dv_make_examples.py", "--cores", dd.get_num_cores(data), "--ref", ref_file, "--reads", bam_file, "--regions", region_bed, "--logdir", log_dir, "--examples", tx_example_dir, "--sample", dd.get_sample_name(data)] do.run(cmd, "DeepVariant make_examples %s" % dd.get_sample_name(data)) for fname in glob.glob(os.path.join(tx_example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data))): utils.copy_plus(fname, os.path.join(example_dir, os.path.basename(fname))) return example_dir
[ "def", "_make_examples", "(", "bam_file", ",", "data", ",", "ref_file", ",", "region_bed", ",", "out_file", ",", "work_dir", ")", ":", "log_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"log\"", ")"...
Create example pileup images to feed into variant calling.
[ "Create", "example", "pileup", "images", "to", "feed", "into", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/deepvariant.py#L42-L55
224,083
bcbio/bcbio-nextgen
bcbio/variation/deepvariant.py
_call_variants
def _call_variants(example_dir, region_bed, data, out_file): """Call variants from prepared pileup examples, creating tensorflow record file. """ tf_out_file = "%s-tfrecord.gz" % utils.splitext_plus(out_file)[0] if not utils.file_exists(tf_out_file): with file_transaction(data, tf_out_file) as tx_out_file: model = "wes" if strelka2.coverage_interval_from_bed(region_bed) == "targeted" else "wgs" cmd = ["dv_call_variants.py", "--cores", dd.get_num_cores(data), "--outfile", tx_out_file, "--examples", example_dir, "--sample", dd.get_sample_name(data), "--model", model] do.run(cmd, "DeepVariant call_variants %s" % dd.get_sample_name(data)) return tf_out_file
python
def _call_variants(example_dir, region_bed, data, out_file): """Call variants from prepared pileup examples, creating tensorflow record file. """ tf_out_file = "%s-tfrecord.gz" % utils.splitext_plus(out_file)[0] if not utils.file_exists(tf_out_file): with file_transaction(data, tf_out_file) as tx_out_file: model = "wes" if strelka2.coverage_interval_from_bed(region_bed) == "targeted" else "wgs" cmd = ["dv_call_variants.py", "--cores", dd.get_num_cores(data), "--outfile", tx_out_file, "--examples", example_dir, "--sample", dd.get_sample_name(data), "--model", model] do.run(cmd, "DeepVariant call_variants %s" % dd.get_sample_name(data)) return tf_out_file
[ "def", "_call_variants", "(", "example_dir", ",", "region_bed", ",", "data", ",", "out_file", ")", ":", "tf_out_file", "=", "\"%s-tfrecord.gz\"", "%", "utils", ".", "splitext_plus", "(", "out_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists...
Call variants from prepared pileup examples, creating tensorflow record file.
[ "Call", "variants", "from", "prepared", "pileup", "examples", "creating", "tensorflow", "record", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/deepvariant.py#L57-L68
224,084
bcbio/bcbio-nextgen
bcbio/variation/deepvariant.py
_postprocess_variants
def _postprocess_variants(record_file, data, ref_file, out_file): """Post-process variants, converting into standard VCF file. """ if not utils.file_uptodate(out_file, record_file): with file_transaction(data, out_file) as tx_out_file: cmd = ["dv_postprocess_variants.py", "--ref", ref_file, "--infile", record_file, "--outfile", tx_out_file] do.run(cmd, "DeepVariant postprocess_variants %s" % dd.get_sample_name(data)) return out_file
python
def _postprocess_variants(record_file, data, ref_file, out_file): """Post-process variants, converting into standard VCF file. """ if not utils.file_uptodate(out_file, record_file): with file_transaction(data, out_file) as tx_out_file: cmd = ["dv_postprocess_variants.py", "--ref", ref_file, "--infile", record_file, "--outfile", tx_out_file] do.run(cmd, "DeepVariant postprocess_variants %s" % dd.get_sample_name(data)) return out_file
[ "def", "_postprocess_variants", "(", "record_file", ",", "data", ",", "ref_file", ",", "out_file", ")", ":", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "record_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ...
Post-process variants, converting into standard VCF file.
[ "Post", "-", "process", "variants", "converting", "into", "standard", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/deepvariant.py#L70-L78
224,085
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
run
def run(bam_file, data, out_dir): """ Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary :param bam_file: (str) path of the bam_file :param data: (list) list containing the all the dictionary for this sample :param out_dir: (str) path of the output :returns: (string) output normalized vcf file """ qsig = config_utils.get_program("qsignature", data["config"]) res_qsig = config_utils.get_resources("qsignature", data["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) if not qsig: logger.info("There is no qsignature tool. Skipping...") return None position = dd.get_qsig_file(data) mixup_check = dd.get_mixup_check(data) if mixup_check and mixup_check.startswith("qsignature"): utils.safe_makedir(out_dir) if not position: logger.info("There is no qsignature for this species: %s" % tz.get_in(['genome_build'], data)) return None if mixup_check == "qsignature_full": down_bam = bam_file else: down_bam = _slice_bam_chr21(bam_file, data) position = _slice_vcf_chr21(position, out_dir) out_name = os.path.basename(down_bam).replace("bam", "qsig.vcf") out_file = os.path.join(out_dir, out_name) log_file = os.path.join(out_dir, "qsig.log") cores = dd.get_cores(data) base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureGenerator " "--noOfThreads {cores} " "-log {log_file} -i {position} " "-i {down_bam} ") if not os.path.exists(out_file): file_qsign_out = "{0}.qsig.vcf".format(down_bam) do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % dd.get_sample_name(data)) if os.path.exists(file_qsign_out): with file_transaction(data, out_file) as file_txt_out: shutil.move(file_qsign_out, file_txt_out) else: raise IOError("File doesn't exist %s" % file_qsign_out) return out_file return None
python
def run(bam_file, data, out_dir): """ Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary :param bam_file: (str) path of the bam_file :param data: (list) list containing the all the dictionary for this sample :param out_dir: (str) path of the output :returns: (string) output normalized vcf file """ qsig = config_utils.get_program("qsignature", data["config"]) res_qsig = config_utils.get_resources("qsignature", data["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) if not qsig: logger.info("There is no qsignature tool. Skipping...") return None position = dd.get_qsig_file(data) mixup_check = dd.get_mixup_check(data) if mixup_check and mixup_check.startswith("qsignature"): utils.safe_makedir(out_dir) if not position: logger.info("There is no qsignature for this species: %s" % tz.get_in(['genome_build'], data)) return None if mixup_check == "qsignature_full": down_bam = bam_file else: down_bam = _slice_bam_chr21(bam_file, data) position = _slice_vcf_chr21(position, out_dir) out_name = os.path.basename(down_bam).replace("bam", "qsig.vcf") out_file = os.path.join(out_dir, out_name) log_file = os.path.join(out_dir, "qsig.log") cores = dd.get_cores(data) base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureGenerator " "--noOfThreads {cores} " "-log {log_file} -i {position} " "-i {down_bam} ") if not os.path.exists(out_file): file_qsign_out = "{0}.qsig.vcf".format(down_bam) do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % dd.get_sample_name(data)) if os.path.exists(file_qsign_out): with file_transaction(data, out_file) as file_txt_out: shutil.move(file_qsign_out, file_txt_out) else: raise IOError("File doesn't exist %s" % file_qsign_out) return out_file return None
[ "def", "run", "(", "bam_file", ",", "data", ",", "out_dir", ")", ":", "qsig", "=", "config_utils", ".", "get_program", "(", "\"qsignature\"", ",", "data", "[", "\"config\"", "]", ")", "res_qsig", "=", "config_utils", ".", "get_resources", "(", "\"qsignature\...
Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary :param bam_file: (str) path of the bam_file :param data: (list) list containing the all the dictionary for this sample :param out_dir: (str) path of the output :returns: (string) output normalized vcf file
[ "Run", "SignatureGenerator", "to", "create", "normalize", "vcf", "that", "later", "will", "be", "input", "of", "qsignature_summary" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L20-L69
224,086
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
summary
def summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data) if vcf: count += 1 vcf_name = dd.get_sample_name(data) + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return []
python
def summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data) if vcf: count += 1 vcf_name = dd.get_sample_name(data) + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return []
[ "def", "summary", "(", "*", "samples", ")", ":", "warnings", ",", "similar", "=", "[", "]", ",", "[", "]", "qsig", "=", "config_utils", ".", "get_program", "(", "\"qsignature\"", ",", "samples", "[", "0", "]", "[", "0", "]", "[", "\"config\"", "]", ...
Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary
[ "Run", "SignatureCompareRelatedSimple", "module", "from", "qsignature", "tool", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L71-L118
224,087
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
_parse_qsignature_output
def _parse_qsignature_output(in_file, out_file, warning_file, data): """ Parse xml file produced by qsignature :param in_file: (str) with the path to the xml file :param out_file: (str) with the path to output file :param warning_file: (str) with the path to warning file :returns: (list) with samples that could be duplicated """ name = {} error, warnings, similar = set(), set(), set() same, replicate, related = 0, 0.1, 0.18 mixup_check = dd.get_mixup_check(data) if mixup_check == "qsignature_full": same, replicate, related = 0, 0.01, 0.061 with open(in_file, 'r') as in_handle: with file_transaction(data, out_file) as out_tx_file: with file_transaction(data, warning_file) as warn_tx_file: with open(out_tx_file, 'w') as out_handle: with open(warn_tx_file, 'w') as warn_handle: et = ET.parse(in_handle) for i in list(et.iter('file')): name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "") for i in list(et.iter('comparison')): msg = None pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]]) out_handle.write("%s\t%s\t%s\n" % (name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score'])) if float(i.attrib['score']) == same: msg = 'qsignature ERROR: read same samples:%s\n' error.add(pair) elif float(i.attrib['score']) < replicate: msg = 'qsignature WARNING: read similar/replicate samples:%s\n' warnings.add(pair) elif float(i.attrib['score']) < related: msg = 'qsignature NOTE: read relative samples:%s\n' similar.add(pair) if msg: logger.info(msg % pair) warn_handle.write(msg % pair) return error, warnings, similar
python
def _parse_qsignature_output(in_file, out_file, warning_file, data): """ Parse xml file produced by qsignature :param in_file: (str) with the path to the xml file :param out_file: (str) with the path to output file :param warning_file: (str) with the path to warning file :returns: (list) with samples that could be duplicated """ name = {} error, warnings, similar = set(), set(), set() same, replicate, related = 0, 0.1, 0.18 mixup_check = dd.get_mixup_check(data) if mixup_check == "qsignature_full": same, replicate, related = 0, 0.01, 0.061 with open(in_file, 'r') as in_handle: with file_transaction(data, out_file) as out_tx_file: with file_transaction(data, warning_file) as warn_tx_file: with open(out_tx_file, 'w') as out_handle: with open(warn_tx_file, 'w') as warn_handle: et = ET.parse(in_handle) for i in list(et.iter('file')): name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "") for i in list(et.iter('comparison')): msg = None pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]]) out_handle.write("%s\t%s\t%s\n" % (name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score'])) if float(i.attrib['score']) == same: msg = 'qsignature ERROR: read same samples:%s\n' error.add(pair) elif float(i.attrib['score']) < replicate: msg = 'qsignature WARNING: read similar/replicate samples:%s\n' warnings.add(pair) elif float(i.attrib['score']) < related: msg = 'qsignature NOTE: read relative samples:%s\n' similar.add(pair) if msg: logger.info(msg % pair) warn_handle.write(msg % pair) return error, warnings, similar
[ "def", "_parse_qsignature_output", "(", "in_file", ",", "out_file", ",", "warning_file", ",", "data", ")", ":", "name", "=", "{", "}", "error", ",", "warnings", ",", "similar", "=", "set", "(", ")", ",", "set", "(", ")", ",", "set", "(", ")", "same",...
Parse xml file produced by qsignature :param in_file: (str) with the path to the xml file :param out_file: (str) with the path to output file :param warning_file: (str) with the path to warning file :returns: (list) with samples that could be duplicated
[ "Parse", "xml", "file", "produced", "by", "qsignature" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L125-L166
224,088
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
_slice_bam_chr21
def _slice_bam_chr21(in_bam, data): """ return only one BAM file with only chromosome 21 """ sambamba = config_utils.get_program("sambamba", data["config"]) out_file = "%s-chr%s" % os.path.splitext(in_bam) if not utils.file_exists(out_file): bam.index(in_bam, data['config']) with pysam.Samfile(in_bam, "rb") as bamfile: bam_contigs = [c["SN"] for c in bamfile.header["SQ"]] chromosome = "21" if "chr21" in bam_contigs: chromosome = "chr21" with file_transaction(data, out_file) as tx_out_file: cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return out_file
python
def _slice_bam_chr21(in_bam, data): """ return only one BAM file with only chromosome 21 """ sambamba = config_utils.get_program("sambamba", data["config"]) out_file = "%s-chr%s" % os.path.splitext(in_bam) if not utils.file_exists(out_file): bam.index(in_bam, data['config']) with pysam.Samfile(in_bam, "rb") as bamfile: bam_contigs = [c["SN"] for c in bamfile.header["SQ"]] chromosome = "21" if "chr21" in bam_contigs: chromosome = "chr21" with file_transaction(data, out_file) as tx_out_file: cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return out_file
[ "def", "_slice_bam_chr21", "(", "in_bam", ",", "data", ")", ":", "sambamba", "=", "config_utils", ".", "get_program", "(", "\"sambamba\"", ",", "data", "[", "\"config\"", "]", ")", "out_file", "=", "\"%s-chr%s\"", "%", "os", ".", "path", ".", "splitext", "...
return only one BAM file with only chromosome 21
[ "return", "only", "one", "BAM", "file", "with", "only", "chromosome", "21" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L168-L184
224,089
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
_slice_vcf_chr21
def _slice_vcf_chr21(vcf_file, out_dir): """ Slice chr21 of qsignature SNPs to reduce computation time """ tmp_file = os.path.join(out_dir, "chr21_qsignature.vcf") if not utils.file_exists(tmp_file): cmd = ("grep chr21 {vcf_file} > {tmp_file}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return tmp_file
python
def _slice_vcf_chr21(vcf_file, out_dir): """ Slice chr21 of qsignature SNPs to reduce computation time """ tmp_file = os.path.join(out_dir, "chr21_qsignature.vcf") if not utils.file_exists(tmp_file): cmd = ("grep chr21 {vcf_file} > {tmp_file}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return tmp_file
[ "def", "_slice_vcf_chr21", "(", "vcf_file", ",", "out_dir", ")", ":", "tmp_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"chr21_qsignature.vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "tmp_file", ")", ":", "cmd", "=", ...
Slice chr21 of qsignature SNPs to reduce computation time
[ "Slice", "chr21", "of", "qsignature", "SNPs", "to", "reduce", "computation", "time" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L186-L194
224,090
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
_combine_files
def _combine_files(orig_files, base_out_file, data, fill_paths=True): """Combine multiple input files, fixing file paths if needed. We fill in full paths from files in the data dictionary if we're not using basepath (old style GEMINI). """ orig_files = [x for x in orig_files if x and utils.file_exists(x)] if not orig_files: return None out_file = "%s-combine%s" % (utils.splitext_plus(base_out_file)[0], utils.splitext_plus(orig_files[0])[-1]) with open(out_file, "w") as out_handle: for orig_file in orig_files: with open(orig_file) as in_handle: for line in in_handle: if fill_paths and line.startswith("file"): line = _fill_file_path(line, data) out_handle.write(line) out_handle.write("\n\n") return out_file
python
def _combine_files(orig_files, base_out_file, data, fill_paths=True): """Combine multiple input files, fixing file paths if needed. We fill in full paths from files in the data dictionary if we're not using basepath (old style GEMINI). """ orig_files = [x for x in orig_files if x and utils.file_exists(x)] if not orig_files: return None out_file = "%s-combine%s" % (utils.splitext_plus(base_out_file)[0], utils.splitext_plus(orig_files[0])[-1]) with open(out_file, "w") as out_handle: for orig_file in orig_files: with open(orig_file) as in_handle: for line in in_handle: if fill_paths and line.startswith("file"): line = _fill_file_path(line, data) out_handle.write(line) out_handle.write("\n\n") return out_file
[ "def", "_combine_files", "(", "orig_files", ",", "base_out_file", ",", "data", ",", "fill_paths", "=", "True", ")", ":", "orig_files", "=", "[", "x", "for", "x", "in", "orig_files", "if", "x", "and", "utils", ".", "file_exists", "(", "x", ")", "]", "if...
Combine multiple input files, fixing file paths if needed. We fill in full paths from files in the data dictionary if we're not using basepath (old style GEMINI).
[ "Combine", "multiple", "input", "files", "fixing", "file", "paths", "if", "needed", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L47-L66
224,091
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
_fill_file_path
def _fill_file_path(line, data): """Fill in a full file path in the configuration file from data dictionary. """ def _find_file(xs, target): if isinstance(xs, dict): for v in xs.values(): f = _find_file(v, target) if f: return f elif isinstance(xs, (list, tuple)): for x in xs: f = _find_file(x, target) if f: return f elif isinstance(xs, six.string_types) and os.path.exists(xs) and xs.endswith("/%s" % target): return xs orig_file = line.split("=")[-1].replace('"', '').strip() full_file = _find_file(data, os.path.basename(orig_file)) if not full_file and os.path.exists(os.path.abspath(orig_file)): full_file = os.path.abspath(orig_file) assert full_file, "Did not find vcfanno input file %s" % (orig_file) return 'file="%s"\n' % full_file
python
def _fill_file_path(line, data): """Fill in a full file path in the configuration file from data dictionary. """ def _find_file(xs, target): if isinstance(xs, dict): for v in xs.values(): f = _find_file(v, target) if f: return f elif isinstance(xs, (list, tuple)): for x in xs: f = _find_file(x, target) if f: return f elif isinstance(xs, six.string_types) and os.path.exists(xs) and xs.endswith("/%s" % target): return xs orig_file = line.split("=")[-1].replace('"', '').strip() full_file = _find_file(data, os.path.basename(orig_file)) if not full_file and os.path.exists(os.path.abspath(orig_file)): full_file = os.path.abspath(orig_file) assert full_file, "Did not find vcfanno input file %s" % (orig_file) return 'file="%s"\n' % full_file
[ "def", "_fill_file_path", "(", "line", ",", "data", ")", ":", "def", "_find_file", "(", "xs", ",", "target", ")", ":", "if", "isinstance", "(", "xs", ",", "dict", ")", ":", "for", "v", "in", "xs", ".", "values", "(", ")", ":", "f", "=", "_find_fi...
Fill in a full file path in the configuration file from data dictionary.
[ "Fill", "in", "a", "full", "file", "path", "in", "the", "configuration", "file", "from", "data", "dictionary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L68-L89
224,092
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
find_annotations
def find_annotations(data, retriever=None): """Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling """ conf_files = dd.get_vcfanno(data) if not isinstance(conf_files, (list, tuple)): conf_files = [conf_files] for c in _default_conf_files(data, retriever): if c not in conf_files: conf_files.append(c) conf_checkers = {"gemini": annotate_gemini, "somatic": _annotate_somatic} out = [] annodir = os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)), os.pardir, "config", "vcfanno")) if not retriever: annodir = os.path.abspath(annodir) for conf_file in conf_files: if objectstore.is_remote(conf_file) or (os.path.exists(conf_file) and os.path.isfile(conf_file)): conffn = conf_file elif not retriever: conffn = os.path.join(annodir, conf_file + ".conf") else: conffn = conf_file + ".conf" luafn = "%s.lua" % utils.splitext_plus(conffn)[0] if retriever: conffn, luafn = [(x if objectstore.is_remote(x) else None) for x in retriever.add_remotes([conffn, luafn], data["config"])] if not conffn: pass elif conf_file in conf_checkers and not conf_checkers[conf_file](data, retriever): logger.warn("Skipping vcfanno configuration: %s. Not all input files found." % conf_file) elif not objectstore.file_exists_or_remote(conffn): build = dd.get_genome_build(data) CONF_NOT_FOUND = ( "The vcfanno configuration {conffn} was not found for {build}, skipping.") logger.warn(CONF_NOT_FOUND.format(**locals())) else: out.append(conffn) if luafn and objectstore.file_exists_or_remote(luafn): out.append(luafn) return out
python
def find_annotations(data, retriever=None): """Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling """ conf_files = dd.get_vcfanno(data) if not isinstance(conf_files, (list, tuple)): conf_files = [conf_files] for c in _default_conf_files(data, retriever): if c not in conf_files: conf_files.append(c) conf_checkers = {"gemini": annotate_gemini, "somatic": _annotate_somatic} out = [] annodir = os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)), os.pardir, "config", "vcfanno")) if not retriever: annodir = os.path.abspath(annodir) for conf_file in conf_files: if objectstore.is_remote(conf_file) or (os.path.exists(conf_file) and os.path.isfile(conf_file)): conffn = conf_file elif not retriever: conffn = os.path.join(annodir, conf_file + ".conf") else: conffn = conf_file + ".conf" luafn = "%s.lua" % utils.splitext_plus(conffn)[0] if retriever: conffn, luafn = [(x if objectstore.is_remote(x) else None) for x in retriever.add_remotes([conffn, luafn], data["config"])] if not conffn: pass elif conf_file in conf_checkers and not conf_checkers[conf_file](data, retriever): logger.warn("Skipping vcfanno configuration: %s. Not all input files found." % conf_file) elif not objectstore.file_exists_or_remote(conffn): build = dd.get_genome_build(data) CONF_NOT_FOUND = ( "The vcfanno configuration {conffn} was not found for {build}, skipping.") logger.warn(CONF_NOT_FOUND.format(**locals())) else: out.append(conffn) if luafn and objectstore.file_exists_or_remote(luafn): out.append(luafn) return out
[ "def", "find_annotations", "(", "data", ",", "retriever", "=", "None", ")", ":", "conf_files", "=", "dd", ".", "get_vcfanno", "(", "data", ")", "if", "not", "isinstance", "(", "conf_files", ",", "(", "list", ",", "tuple", ")", ")", ":", "conf_files", "...
Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling
[ "Find", "annotation", "configuration", "files", "for", "vcfanno", "using", "pre", "-", "installed", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L91-L137
224,093
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
annotate_gemini
def annotate_gemini(data, retriever=None): """Annotate with population calls if have data installed. """ r = dd.get_variation_resources(data) return all([r.get(k) and objectstore.file_exists_or_remote(r[k]) for k in ["exac", "gnomad_exome"]])
python
def annotate_gemini(data, retriever=None): """Annotate with population calls if have data installed. """ r = dd.get_variation_resources(data) return all([r.get(k) and objectstore.file_exists_or_remote(r[k]) for k in ["exac", "gnomad_exome"]])
[ "def", "annotate_gemini", "(", "data", ",", "retriever", "=", "None", ")", ":", "r", "=", "dd", ".", "get_variation_resources", "(", "data", ")", "return", "all", "(", "[", "r", ".", "get", "(", "k", ")", "and", "objectstore", ".", "file_exists_or_remote...
Annotate with population calls if have data installed.
[ "Annotate", "with", "population", "calls", "if", "have", "data", "installed", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L150-L154
224,094
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
_annotate_somatic
def _annotate_somatic(data, retriever=None): """Annotate somatic calls if we have cosmic data installed. """ if is_human(data): paired = vcfutils.get_paired([data]) if paired: r = dd.get_variation_resources(data) if r.get("cosmic") and objectstore.file_exists_or_remote(r["cosmic"]): return True return False
python
def _annotate_somatic(data, retriever=None): """Annotate somatic calls if we have cosmic data installed. """ if is_human(data): paired = vcfutils.get_paired([data]) if paired: r = dd.get_variation_resources(data) if r.get("cosmic") and objectstore.file_exists_or_remote(r["cosmic"]): return True return False
[ "def", "_annotate_somatic", "(", "data", ",", "retriever", "=", "None", ")", ":", "if", "is_human", "(", "data", ")", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "[", "data", "]", ")", "if", "paired", ":", "r", "=", "dd", ".", "get_variat...
Annotate somatic calls if we have cosmic data installed.
[ "Annotate", "somatic", "calls", "if", "we", "have", "cosmic", "data", "installed", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L156-L165
224,095
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
is_human
def is_human(data, builds=None): """Check if human, optionally with build number, search by name or extra GL contigs. """ def has_build37_contigs(data): for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.startswith("GL") or contig.name.find("_gl") >= 0: if contig.name in naming.GMAP["hg19"] or contig.name in naming.GMAP["GRCh37"]: return True return False if not builds and tz.get_in(["genome_resources", "aliases", "human"], data): return True if not builds or "37" in builds: target_builds = ["hg19", "GRCh37"] if any([dd.get_genome_build(data).startswith(b) for b in target_builds]): return True elif has_build37_contigs(data): return True if not builds or "38" in builds: target_builds = ["hg38"] if any([dd.get_genome_build(data).startswith(b) for b in target_builds]): return True return False
python
def is_human(data, builds=None): """Check if human, optionally with build number, search by name or extra GL contigs. """ def has_build37_contigs(data): for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.startswith("GL") or contig.name.find("_gl") >= 0: if contig.name in naming.GMAP["hg19"] or contig.name in naming.GMAP["GRCh37"]: return True return False if not builds and tz.get_in(["genome_resources", "aliases", "human"], data): return True if not builds or "37" in builds: target_builds = ["hg19", "GRCh37"] if any([dd.get_genome_build(data).startswith(b) for b in target_builds]): return True elif has_build37_contigs(data): return True if not builds or "38" in builds: target_builds = ["hg38"] if any([dd.get_genome_build(data).startswith(b) for b in target_builds]): return True return False
[ "def", "is_human", "(", "data", ",", "builds", "=", "None", ")", ":", "def", "has_build37_contigs", "(", "data", ")", ":", "for", "contig", "in", "ref", ".", "file_contigs", "(", "dd", ".", "get_ref_file", "(", "data", ")", ")", ":", "if", "contig", ...
Check if human, optionally with build number, search by name or extra GL contigs.
[ "Check", "if", "human", "optionally", "with", "build", "number", "search", "by", "name", "or", "extra", "GL", "contigs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L167-L188
224,096
bcbio/bcbio-nextgen
bcbio/distributed/resources.py
_get_resource_programs
def _get_resource_programs(progs, algs): """Retrieve programs used in analysis based on algorithm configurations. Handles special cases like aligners and variant callers. """ checks = {"gatk-vqsr": config_utils.use_vqsr, "snpeff": config_utils.use_snpeff, "bcbio-variation-recall": config_utils.use_bcbio_variation_recall} parent_child = {"vardict": _parent_prefix("vardict")} out = set([]) for p in progs: if p == "aligner": for alg in algs: aligner = alg.get("aligner") if aligner and not isinstance(aligner, bool): out.add(aligner) elif p in ["variantcaller", "svcaller", "peakcaller"]: if p == "variantcaller": for key, fn in parent_child.items(): if fn(algs): out.add(key) for alg in algs: callers = alg.get(p) if callers and not isinstance(callers, bool): if isinstance(callers, dict): callers = reduce(operator.add, callers.values()) if isinstance(callers, (list, tuple)): for x in callers: out.add(x) else: out.add(callers) elif p in checks: if checks[p](algs): out.add(p) else: out.add(p) return sorted(list(out))
python
def _get_resource_programs(progs, algs): """Retrieve programs used in analysis based on algorithm configurations. Handles special cases like aligners and variant callers. """ checks = {"gatk-vqsr": config_utils.use_vqsr, "snpeff": config_utils.use_snpeff, "bcbio-variation-recall": config_utils.use_bcbio_variation_recall} parent_child = {"vardict": _parent_prefix("vardict")} out = set([]) for p in progs: if p == "aligner": for alg in algs: aligner = alg.get("aligner") if aligner and not isinstance(aligner, bool): out.add(aligner) elif p in ["variantcaller", "svcaller", "peakcaller"]: if p == "variantcaller": for key, fn in parent_child.items(): if fn(algs): out.add(key) for alg in algs: callers = alg.get(p) if callers and not isinstance(callers, bool): if isinstance(callers, dict): callers = reduce(operator.add, callers.values()) if isinstance(callers, (list, tuple)): for x in callers: out.add(x) else: out.add(callers) elif p in checks: if checks[p](algs): out.add(p) else: out.add(p) return sorted(list(out))
[ "def", "_get_resource_programs", "(", "progs", ",", "algs", ")", ":", "checks", "=", "{", "\"gatk-vqsr\"", ":", "config_utils", ".", "use_vqsr", ",", "\"snpeff\"", ":", "config_utils", ".", "use_snpeff", ",", "\"bcbio-variation-recall\"", ":", "config_utils", ".",...
Retrieve programs used in analysis based on algorithm configurations. Handles special cases like aligners and variant callers.
[ "Retrieve", "programs", "used", "in", "analysis", "based", "on", "algorithm", "configurations", ".", "Handles", "special", "cases", "like", "aligners", "and", "variant", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/resources.py#L14-L49
224,097
bcbio/bcbio-nextgen
bcbio/distributed/resources.py
_parent_prefix
def _parent_prefix(prefix): """Identify a parent prefix we should add to resources if present in a caller name. """ def run(algs): for alg in algs: vcs = alg.get("variantcaller") if vcs: if isinstance(vcs, dict): vcs = reduce(operator.add, vcs.values()) if not isinstance(vcs, (list, tuple)): vcs = [vcs] return any(vc.startswith(prefix) for vc in vcs if vc) return run
python
def _parent_prefix(prefix): """Identify a parent prefix we should add to resources if present in a caller name. """ def run(algs): for alg in algs: vcs = alg.get("variantcaller") if vcs: if isinstance(vcs, dict): vcs = reduce(operator.add, vcs.values()) if not isinstance(vcs, (list, tuple)): vcs = [vcs] return any(vc.startswith(prefix) for vc in vcs if vc) return run
[ "def", "_parent_prefix", "(", "prefix", ")", ":", "def", "run", "(", "algs", ")", ":", "for", "alg", "in", "algs", ":", "vcs", "=", "alg", ".", "get", "(", "\"variantcaller\"", ")", "if", "vcs", ":", "if", "isinstance", "(", "vcs", ",", "dict", ")"...
Identify a parent prefix we should add to resources if present in a caller name.
[ "Identify", "a", "parent", "prefix", "we", "should", "add", "to", "resources", "if", "present", "in", "a", "caller", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/resources.py#L51-L63
224,098
bcbio/bcbio-nextgen
bcbio/distributed/resources.py
_ensure_min_resources
def _ensure_min_resources(progs, cores, memory, min_memory): """Ensure setting match minimum resources required for used programs. """ for p in progs: if p in min_memory: if not memory or cores * memory < min_memory[p]: memory = float(min_memory[p]) / cores return cores, memory
python
def _ensure_min_resources(progs, cores, memory, min_memory): """Ensure setting match minimum resources required for used programs. """ for p in progs: if p in min_memory: if not memory or cores * memory < min_memory[p]: memory = float(min_memory[p]) / cores return cores, memory
[ "def", "_ensure_min_resources", "(", "progs", ",", "cores", ",", "memory", ",", "min_memory", ")", ":", "for", "p", "in", "progs", ":", "if", "p", "in", "min_memory", ":", "if", "not", "memory", "or", "cores", "*", "memory", "<", "min_memory", "[", "p"...
Ensure setting match minimum resources required for used programs.
[ "Ensure", "setting", "match", "minimum", "resources", "required", "for", "used", "programs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/resources.py#L65-L72
224,099
bcbio/bcbio-nextgen
bcbio/distributed/resources.py
_get_prog_memory
def _get_prog_memory(resources, cores_per_job): """Get expected memory usage, in Gb per core, for a program from resource specification. """ out = None for jvm_opt in resources.get("jvm_opts", []): if jvm_opt.startswith("-Xmx"): out = _str_memory_to_gb(jvm_opt[4:]) memory = resources.get("memory") if memory: out = _str_memory_to_gb(memory) prog_cores = resources.get("cores") # if a single core with memory is requested for the job # and we run multiple cores, scale down to avoid overscheduling if out and prog_cores and int(prog_cores) == 1 and cores_per_job > int(prog_cores): out = out / float(cores_per_job) return out
python
def _get_prog_memory(resources, cores_per_job): """Get expected memory usage, in Gb per core, for a program from resource specification. """ out = None for jvm_opt in resources.get("jvm_opts", []): if jvm_opt.startswith("-Xmx"): out = _str_memory_to_gb(jvm_opt[4:]) memory = resources.get("memory") if memory: out = _str_memory_to_gb(memory) prog_cores = resources.get("cores") # if a single core with memory is requested for the job # and we run multiple cores, scale down to avoid overscheduling if out and prog_cores and int(prog_cores) == 1 and cores_per_job > int(prog_cores): out = out / float(cores_per_job) return out
[ "def", "_get_prog_memory", "(", "resources", ",", "cores_per_job", ")", ":", "out", "=", "None", "for", "jvm_opt", "in", "resources", ".", "get", "(", "\"jvm_opts\"", ",", "[", "]", ")", ":", "if", "jvm_opt", ".", "startswith", "(", "\"-Xmx\"", ")", ":",...
Get expected memory usage, in Gb per core, for a program from resource specification.
[ "Get", "expected", "memory", "usage", "in", "Gb", "per", "core", "for", "a", "program", "from", "resource", "specification", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/resources.py#L83-L98