id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
223,400
bcbio/bcbio-nextgen
bcbio/install.py
_datatarget_defaults
def _datatarget_defaults(args, default_args): """Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications. """ default_data = default_args.get("datatarget", []) # back-compatible toolplus specifications for x in default_args.get("toolplus", []): val = None if x == "data": val = "gemini" elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]: val = x if val and val not in default_data: default_data.append(val) new_val = getattr(args, "datatarget") for x in default_data: if x not in new_val: new_val.append(x) has_std_target = False std_targets = ["variation", "rnaseq", "smallrna"] for target in std_targets: if target in new_val: has_std_target = True break if not has_std_target: new_val = new_val + std_targets setattr(args, "datatarget", new_val) return args
python
def _datatarget_defaults(args, default_args): """Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications. """ default_data = default_args.get("datatarget", []) # back-compatible toolplus specifications for x in default_args.get("toolplus", []): val = None if x == "data": val = "gemini" elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]: val = x if val and val not in default_data: default_data.append(val) new_val = getattr(args, "datatarget") for x in default_data: if x not in new_val: new_val.append(x) has_std_target = False std_targets = ["variation", "rnaseq", "smallrna"] for target in std_targets: if target in new_val: has_std_target = True break if not has_std_target: new_val = new_val + std_targets setattr(args, "datatarget", new_val) return args
[ "def", "_datatarget_defaults", "(", "args", ",", "default_args", ")", ":", "default_data", "=", "default_args", ".", "get", "(", "\"datatarget\"", ",", "[", "]", ")", "# back-compatible toolplus specifications", "for", "x", "in", "default_args", ".", "get", "(", ...
Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications.
[ "Set", "data", "installation", "targets", "handling", "defaults", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L699-L730
223,401
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_merge_wf_inputs
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs): """Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally. """ internal_generated_ids = [] for vignore in to_ignore: vignore_id = _get_string_vid(vignore) # ignore anything we generate internally, but not those we need to pull in # from the external process if vignore_id not in [v["id"] for v in wf_outputs]: internal_generated_ids.append(vignore_id) ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs]) cur_ids = set([v["id"] for v in out]) remapped_new = [] for v in new: remapped_v = copy.deepcopy(v) outv = copy.deepcopy(v) outv["id"] = get_base_id(v["id"]) outv["source"] = v["id"] if outv["id"] not in cur_ids and outv["id"] not in ignore_ids: if nested_inputs and v["id"] in nested_inputs: outv = _flatten_nested_input(outv) out.append(outv) if remapped_v["id"] in set([v["source"] for v in out]): remapped_v["source"] = get_base_id(remapped_v["id"]) remapped_new.append(remapped_v) return out, remapped_new
python
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs): """Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally. """ internal_generated_ids = [] for vignore in to_ignore: vignore_id = _get_string_vid(vignore) # ignore anything we generate internally, but not those we need to pull in # from the external process if vignore_id not in [v["id"] for v in wf_outputs]: internal_generated_ids.append(vignore_id) ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs]) cur_ids = set([v["id"] for v in out]) remapped_new = [] for v in new: remapped_v = copy.deepcopy(v) outv = copy.deepcopy(v) outv["id"] = get_base_id(v["id"]) outv["source"] = v["id"] if outv["id"] not in cur_ids and outv["id"] not in ignore_ids: if nested_inputs and v["id"] in nested_inputs: outv = _flatten_nested_input(outv) out.append(outv) if remapped_v["id"] in set([v["source"] for v in out]): remapped_v["source"] = get_base_id(remapped_v["id"]) remapped_new.append(remapped_v) return out, remapped_new
[ "def", "_merge_wf_inputs", "(", "new", ",", "out", ",", "wf_outputs", ",", "to_ignore", ",", "parallel", ",", "nested_inputs", ")", ":", "internal_generated_ids", "=", "[", "]", "for", "vignore", "in", "to_ignore", ":", "vignore_id", "=", "_get_string_vid", "(...
Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally.
[ "Merge", "inputs", "for", "a", "sub", "-", "workflow", "adding", "any", "not", "present", "inputs", "in", "out", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L72-L100
223,402
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_merge_wf_outputs
def _merge_wf_outputs(new, cur, parallel): """Merge outputs for a sub-workflow, replacing variables changed in later steps. ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps """ new_ids = set([]) out = [] for v in new: outv = {} outv["source"] = v["id"] outv["id"] = "%s" % get_base_id(v["id"]) outv["type"] = v["type"] if "secondaryFiles" in v: outv["secondaryFiles"] = v["secondaryFiles"] if tz.get_in(["outputBinding", "secondaryFiles"], v): outv["secondaryFiles"] = tz.get_in(["outputBinding", "secondaryFiles"], v) new_ids.add(outv["id"]) out.append(outv) for outv in cur: if outv["id"] not in new_ids: out.append(outv) return out
python
def _merge_wf_outputs(new, cur, parallel): """Merge outputs for a sub-workflow, replacing variables changed in later steps. ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps """ new_ids = set([]) out = [] for v in new: outv = {} outv["source"] = v["id"] outv["id"] = "%s" % get_base_id(v["id"]) outv["type"] = v["type"] if "secondaryFiles" in v: outv["secondaryFiles"] = v["secondaryFiles"] if tz.get_in(["outputBinding", "secondaryFiles"], v): outv["secondaryFiles"] = tz.get_in(["outputBinding", "secondaryFiles"], v) new_ids.add(outv["id"]) out.append(outv) for outv in cur: if outv["id"] not in new_ids: out.append(outv) return out
[ "def", "_merge_wf_outputs", "(", "new", ",", "cur", ",", "parallel", ")", ":", "new_ids", "=", "set", "(", "[", "]", ")", "out", "=", "[", "]", "for", "v", "in", "new", ":", "outv", "=", "{", "}", "outv", "[", "\"source\"", "]", "=", "v", "[", ...
Merge outputs for a sub-workflow, replacing variables changed in later steps. ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps
[ "Merge", "outputs", "for", "a", "sub", "-", "workflow", "replacing", "variables", "changed", "in", "later", "steps", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L102-L123
223,403
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_extract_from_subworkflow
def _extract_from_subworkflow(vs, step): """Remove internal variable names when moving from sub-workflow to main. """ substep_ids = set([x.name for x in step.workflow]) out = [] for var in vs: internal = False parts = var["id"].split("/") if len(parts) > 1: if parts[0] in substep_ids: internal = True if not internal: var.pop("source", None) out.append(var) return out
python
def _extract_from_subworkflow(vs, step): """Remove internal variable names when moving from sub-workflow to main. """ substep_ids = set([x.name for x in step.workflow]) out = [] for var in vs: internal = False parts = var["id"].split("/") if len(parts) > 1: if parts[0] in substep_ids: internal = True if not internal: var.pop("source", None) out.append(var) return out
[ "def", "_extract_from_subworkflow", "(", "vs", ",", "step", ")", ":", "substep_ids", "=", "set", "(", "[", "x", ".", "name", "for", "x", "in", "step", ".", "workflow", "]", ")", "out", "=", "[", "]", "for", "var", "in", "vs", ":", "internal", "=", ...
Remove internal variable names when moving from sub-workflow to main.
[ "Remove", "internal", "variable", "names", "when", "moving", "from", "sub", "-", "workflow", "to", "main", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L125-L139
223,404
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
is_cwl_record
def is_cwl_record(d): """Check if an input is a CWL record, from any level of nesting. """ if isinstance(d, dict): if d.get("type") == "record": return d else: recs = list(filter(lambda x: x is not None, [is_cwl_record(v) for v in d.values()])) return recs[0] if recs else None else: return None
python
def is_cwl_record(d): """Check if an input is a CWL record, from any level of nesting. """ if isinstance(d, dict): if d.get("type") == "record": return d else: recs = list(filter(lambda x: x is not None, [is_cwl_record(v) for v in d.values()])) return recs[0] if recs else None else: return None
[ "def", "is_cwl_record", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "if", "d", ".", "get", "(", "\"type\"", ")", "==", "\"record\"", ":", "return", "d", "else", ":", "recs", "=", "list", "(", "filter", "(", "lambda", ...
Check if an input is a CWL record, from any level of nesting.
[ "Check", "if", "an", "input", "is", "a", "CWL", "record", "from", "any", "level", "of", "nesting", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L150-L160
223,405
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_get_step_inputs
def _get_step_inputs(step, file_vs, std_vs, parallel_ids, wf=None): """Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process. """ inputs = [] skip_inputs = set([]) for orig_input in [_get_variable(x, file_vs) for x in _handle_special_inputs(step.inputs, file_vs)]: inputs.append(orig_input) # Only add description and other information for non-record inputs, otherwise batched with records if not any(is_cwl_record(x) for x in inputs): inputs += [v for v in std_vs if get_base_id(v["id"]) not in skip_inputs] nested_inputs = [] if step.parallel in ["single-merge", "batch-merge"]: if parallel_ids: inputs = [_nest_variable(x) if x["id"] in parallel_ids else x for x in inputs] nested_inputs = parallel_ids[:] parallel_ids = [] elif step.parallel in ["multi-combined"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] inputs = [_nest_variable(x) for x in inputs] elif step.parallel in ["multi-batch"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] # If we're batching,with mixed records/inputs avoid double nesting records inputs = [_nest_variable(x, check_records=(len(inputs) > 1)) for x in inputs] # avoid inputs/outputs with the same name outputs = [_get_string_vid(x["id"]) for x in step.outputs] final_inputs = [] for input in inputs: input["wf_duplicate"] = get_base_id(input["id"]) in outputs final_inputs.append(input) return inputs, parallel_ids, nested_inputs
python
def _get_step_inputs(step, file_vs, std_vs, parallel_ids, wf=None): """Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process. """ inputs = [] skip_inputs = set([]) for orig_input in [_get_variable(x, file_vs) for x in _handle_special_inputs(step.inputs, file_vs)]: inputs.append(orig_input) # Only add description and other information for non-record inputs, otherwise batched with records if not any(is_cwl_record(x) for x in inputs): inputs += [v for v in std_vs if get_base_id(v["id"]) not in skip_inputs] nested_inputs = [] if step.parallel in ["single-merge", "batch-merge"]: if parallel_ids: inputs = [_nest_variable(x) if x["id"] in parallel_ids else x for x in inputs] nested_inputs = parallel_ids[:] parallel_ids = [] elif step.parallel in ["multi-combined"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] inputs = [_nest_variable(x) for x in inputs] elif step.parallel in ["multi-batch"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] # If we're batching,with mixed records/inputs avoid double nesting records inputs = [_nest_variable(x, check_records=(len(inputs) > 1)) for x in inputs] # avoid inputs/outputs with the same name outputs = [_get_string_vid(x["id"]) for x in step.outputs] final_inputs = [] for input in inputs: input["wf_duplicate"] = get_base_id(input["id"]) in outputs final_inputs.append(input) return inputs, parallel_ids, nested_inputs
[ "def", "_get_step_inputs", "(", "step", ",", "file_vs", ",", "std_vs", ",", "parallel_ids", ",", "wf", "=", "None", ")", ":", "inputs", "=", "[", "]", "skip_inputs", "=", "set", "(", "[", "]", ")", "for", "orig_input", "in", "[", "_get_variable", "(", ...
Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process.
[ "Retrieve", "inputs", "for", "a", "step", "from", "existing", "variables", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L162-L197
223,406
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_nest_variable
def _nest_variable(v, check_records=False): """Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array. """ if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and v.get("type", {}).get("type") == "array"): return v else: v = copy.deepcopy(v) v["type"] = {"type": "array", "items": v["type"]} return v
python
def _nest_variable(v, check_records=False): """Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array. """ if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and v.get("type", {}).get("type") == "array"): return v else: v = copy.deepcopy(v) v["type"] = {"type": "array", "items": v["type"]} return v
[ "def", "_nest_variable", "(", "v", ",", "check_records", "=", "False", ")", ":", "if", "(", "check_records", "and", "is_cwl_record", "(", "v", ")", "and", "len", "(", "v", "[", "\"id\"", "]", ".", "split", "(", "\"/\"", ")", ")", ">", "1", "and", "...
Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array.
[ "Nest", "a", "variable", "when", "moving", "from", "scattered", "back", "to", "consolidated", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L251-L263
223,407
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_clean_output
def _clean_output(v): """Remove output specific variables to allow variables to be inputs to next steps. """ out = copy.deepcopy(v) outb = out.pop("outputBinding", {}) if "secondaryFiles" in outb: out["secondaryFiles"] = outb["secondaryFiles"] return out
python
def _clean_output(v): """Remove output specific variables to allow variables to be inputs to next steps. """ out = copy.deepcopy(v) outb = out.pop("outputBinding", {}) if "secondaryFiles" in outb: out["secondaryFiles"] = outb["secondaryFiles"] return out
[ "def", "_clean_output", "(", "v", ")", ":", "out", "=", "copy", ".", "deepcopy", "(", "v", ")", "outb", "=", "out", ".", "pop", "(", "\"outputBinding\"", ",", "{", "}", ")", "if", "\"secondaryFiles\"", "in", "outb", ":", "out", "[", "\"secondaryFiles\"...
Remove output specific variables to allow variables to be inputs to next steps.
[ "Remove", "output", "specific", "variables", "to", "allow", "variables", "to", "be", "inputs", "to", "next", "steps", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L265-L272
223,408
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_get_variable
def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
python
def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
[ "def", "_get_variable", "(", "vid", ",", "variables", ")", ":", "if", "isinstance", "(", "vid", ",", "six", ".", "string_types", ")", ":", "vid", "=", "get_base_id", "(", "vid", ")", "else", ":", "vid", "=", "_get_string_vid", "(", "vid", ")", "for", ...
Retrieve an input variable from our existing pool of options.
[ "Retrieve", "an", "input", "variable", "from", "our", "existing", "pool", "of", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L280-L290
223,409
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_handle_special_inputs
def _handle_special_inputs(inputs, variables): """Adjust input variables based on special cases. This case handles inputs where we are optional or can have flexible choices. XXX Need to better expose this at a top level definition. """ from bcbio import structural optional = [["config", "algorithm", "coverage"], ["config", "algorithm", "variant_regions"], ["config", "algorithm", "sv_regions"], ["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"]] all_vs = set([get_base_id(v["id"]) for v in variables]) out = [] for input in inputs: if input == ["reference", "aligner", "indexes"]: for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] in alignment.TOOLS: out.append(vid) elif input == ["reference", "snpeff", "genome_build"]: found_indexes = False for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] == "snpeff": out.append(vid) found_indexes = True assert found_indexes, "Found no snpEff indexes in %s" % [v["id"] for v in variables] elif input == ["config", "algorithm", "background", "cnv_reference"]: for v in variables: vid = get_base_id(v["id"]).split("__") if (vid[:4] == ["config", "algorithm", "background", "cnv_reference"] and structural.supports_cnv_reference(vid[4])): out.append(vid) elif input in optional: if _get_string_vid(input) in all_vs: out.append(input) else: out.append(input) return out
python
def _handle_special_inputs(inputs, variables): """Adjust input variables based on special cases. This case handles inputs where we are optional or can have flexible choices. XXX Need to better expose this at a top level definition. """ from bcbio import structural optional = [["config", "algorithm", "coverage"], ["config", "algorithm", "variant_regions"], ["config", "algorithm", "sv_regions"], ["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"]] all_vs = set([get_base_id(v["id"]) for v in variables]) out = [] for input in inputs: if input == ["reference", "aligner", "indexes"]: for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] in alignment.TOOLS: out.append(vid) elif input == ["reference", "snpeff", "genome_build"]: found_indexes = False for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] == "snpeff": out.append(vid) found_indexes = True assert found_indexes, "Found no snpEff indexes in %s" % [v["id"] for v in variables] elif input == ["config", "algorithm", "background", "cnv_reference"]: for v in variables: vid = get_base_id(v["id"]).split("__") if (vid[:4] == ["config", "algorithm", "background", "cnv_reference"] and structural.supports_cnv_reference(vid[4])): out.append(vid) elif input in optional: if _get_string_vid(input) in all_vs: out.append(input) else: out.append(input) return out
[ "def", "_handle_special_inputs", "(", "inputs", ",", "variables", ")", ":", "from", "bcbio", "import", "structural", "optional", "=", "[", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"coverage\"", "]", ",", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"...
Adjust input variables based on special cases. This case handles inputs where we are optional or can have flexible choices. XXX Need to better expose this at a top level definition.
[ "Adjust", "input", "variables", "based", "on", "special", "cases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L292-L332
223,410
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_create_record
def _create_record(name, field_defs, step_name, inputs, unlist, file_vs, std_vs, parallel): """Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. """ if field_defs: fields = [] inherit = [] inherit_all = False inherit_exclude = [] for fdef in field_defs: if not fdef.get("type"): if fdef["id"] == "inherit": inherit_all = True inherit_exclude = fdef.get("exclude", []) else: inherit.append(fdef["id"]) else: cur = {"name": _get_string_vid(fdef["id"]), "type": fdef["type"]} fields.append(_add_secondary_to_rec_field(fdef, cur)) if inherit_all: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, exclude=inherit_exclude)) elif inherit: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, inherit)) else: fields = _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel) out = {"id": "%s/%s" % (step_name, name), "type": {"name": name, "type": "record", "fields": fields}} if parallel in ["batch-single", "multi-batch"]: out = _nest_variable(out) return out
python
def _create_record(name, field_defs, step_name, inputs, unlist, file_vs, std_vs, parallel): """Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. """ if field_defs: fields = [] inherit = [] inherit_all = False inherit_exclude = [] for fdef in field_defs: if not fdef.get("type"): if fdef["id"] == "inherit": inherit_all = True inherit_exclude = fdef.get("exclude", []) else: inherit.append(fdef["id"]) else: cur = {"name": _get_string_vid(fdef["id"]), "type": fdef["type"]} fields.append(_add_secondary_to_rec_field(fdef, cur)) if inherit_all: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, exclude=inherit_exclude)) elif inherit: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, inherit)) else: fields = _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel) out = {"id": "%s/%s" % (step_name, name), "type": {"name": name, "type": "record", "fields": fields}} if parallel in ["batch-single", "multi-batch"]: out = _nest_variable(out) return out
[ "def", "_create_record", "(", "name", ",", "field_defs", ",", "step_name", ",", "inputs", ",", "unlist", ",", "file_vs", ",", "std_vs", ",", "parallel", ")", ":", "if", "field_defs", ":", "fields", "=", "[", "]", "inherit", "=", "[", "]", "inherit_all", ...
Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization.
[ "Create", "an", "output", "record", "by", "rearranging", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L348-L382
223,411
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_infer_record_outputs
def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None, exclude=None): """Infer the outputs of a record from the original inputs """ fields = [] unlist = set([_get_string_vid(x) for x in unlist]) input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)]) to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None added = set([]) for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]: # unpack record inside this record and un-nested inputs to avoid double nested cur_record = is_cwl_record(raw_v) if cur_record: # unlist = unlist | set([field["name"] for field in cur_record["fields"]]) nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]] else: nested_vs = [raw_v] for orig_v in nested_vs: if (get_base_id(orig_v["id"]) not in added and (not to_include or get_base_id(orig_v["id"]) in to_include)): if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude): cur_v = {} cur_v["name"] = get_base_id(orig_v["id"]) cur_v["type"] = orig_v["type"] if cur_v["name"] in unlist: cur_v = _flatten_nested_input(cur_v) fields.append(_add_secondary_to_rec_field(orig_v, cur_v)) added.add(get_base_id(orig_v["id"])) return fields
python
def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None, exclude=None): """Infer the outputs of a record from the original inputs """ fields = [] unlist = set([_get_string_vid(x) for x in unlist]) input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)]) to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None added = set([]) for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]: # unpack record inside this record and un-nested inputs to avoid double nested cur_record = is_cwl_record(raw_v) if cur_record: # unlist = unlist | set([field["name"] for field in cur_record["fields"]]) nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]] else: nested_vs = [raw_v] for orig_v in nested_vs: if (get_base_id(orig_v["id"]) not in added and (not to_include or get_base_id(orig_v["id"]) in to_include)): if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude): cur_v = {} cur_v["name"] = get_base_id(orig_v["id"]) cur_v["type"] = orig_v["type"] if cur_v["name"] in unlist: cur_v = _flatten_nested_input(cur_v) fields.append(_add_secondary_to_rec_field(orig_v, cur_v)) added.add(get_base_id(orig_v["id"])) return fields
[ "def", "_infer_record_outputs", "(", "inputs", ",", "unlist", ",", "file_vs", ",", "std_vs", ",", "parallel", ",", "to_include", "=", "None", ",", "exclude", "=", "None", ")", ":", "fields", "=", "[", "]", "unlist", "=", "set", "(", "[", "_get_string_vid...
Infer the outputs of a record from the original inputs
[ "Infer", "the", "outputs", "of", "a", "record", "from", "the", "original", "inputs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L390-L419
223,412
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_create_variable
def _create_variable(orig_v, step, variables): """Create a new output variable, potentially over-writing existing or creating new. """ # get current variable, and convert to be the output of our process step try: v = _get_variable(orig_v["id"], variables) except ValueError: v = copy.deepcopy(orig_v) if not isinstance(v["id"], six.string_types): v["id"] = _get_string_vid(v["id"]) for key, val in orig_v.items(): if key not in ["id", "type"]: v[key] = val if orig_v.get("type") != "null": v["type"] = orig_v["type"] v["id"] = "%s/%s" % (step.name, get_base_id(v["id"])) return v
python
def _create_variable(orig_v, step, variables): """Create a new output variable, potentially over-writing existing or creating new. """ # get current variable, and convert to be the output of our process step try: v = _get_variable(orig_v["id"], variables) except ValueError: v = copy.deepcopy(orig_v) if not isinstance(v["id"], six.string_types): v["id"] = _get_string_vid(v["id"]) for key, val in orig_v.items(): if key not in ["id", "type"]: v[key] = val if orig_v.get("type") != "null": v["type"] = orig_v["type"] v["id"] = "%s/%s" % (step.name, get_base_id(v["id"])) return v
[ "def", "_create_variable", "(", "orig_v", ",", "step", ",", "variables", ")", ":", "# get current variable, and convert to be the output of our process step", "try", ":", "v", "=", "_get_variable", "(", "orig_v", "[", "\"id\"", "]", ",", "variables", ")", "except", ...
Create a new output variable, potentially over-writing existing or creating new.
[ "Create", "a", "new", "output", "variable", "potentially", "over", "-", "writing", "existing", "or", "creating", "new", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L421-L437
223,413
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_merge_variables
def _merge_variables(new, cur): """Add any new variables to the world representation in cur. Replaces any variables adjusted by previous steps. """ new_added = set([]) out = [] for cur_var in cur: updated = False for new_var in new: if get_base_id(new_var["id"]) == get_base_id(cur_var["id"]): out.append(new_var) new_added.add(new_var["id"]) updated = True break if not updated: out.append(cur_var) for new_var in new: if new_var["id"] not in new_added: out.append(new_var) return out
python
def _merge_variables(new, cur): """Add any new variables to the world representation in cur. Replaces any variables adjusted by previous steps. """ new_added = set([]) out = [] for cur_var in cur: updated = False for new_var in new: if get_base_id(new_var["id"]) == get_base_id(cur_var["id"]): out.append(new_var) new_added.add(new_var["id"]) updated = True break if not updated: out.append(cur_var) for new_var in new: if new_var["id"] not in new_added: out.append(new_var) return out
[ "def", "_merge_variables", "(", "new", ",", "cur", ")", ":", "new_added", "=", "set", "(", "[", "]", ")", "out", "=", "[", "]", "for", "cur_var", "in", "cur", ":", "updated", "=", "False", "for", "new_var", "in", "new", ":", "if", "get_base_id", "(...
Add any new variables to the world representation in cur. Replaces any variables adjusted by previous steps.
[ "Add", "any", "new", "variables", "to", "the", "world", "representation", "in", "cur", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L439-L459
223,414
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
split
def split(*items): """Split samples into all possible genomes for alignment. """ out = [] for data in [x[0] for x in items]: dis_orgs = data["config"]["algorithm"].get("disambiguate") if dis_orgs: if not data.get("disambiguate", None): data["disambiguate"] = {"genome_build": data["genome_build"], "base": True} out.append([data]) # handle the instance where a single organism is disambiguated if isinstance(dis_orgs, six.string_types): dis_orgs = [dis_orgs] for dis_org in dis_orgs: dis_data = copy.deepcopy(data) dis_data["disambiguate"] = {"genome_build": dis_org} dis_data["genome_build"] = dis_org dis_data["config"]["algorithm"]["effects"] = False dis_data = run_info.add_reference_resources(dis_data) out.append([dis_data]) else: out.append([data]) return out
python
def split(*items): """Split samples into all possible genomes for alignment. """ out = [] for data in [x[0] for x in items]: dis_orgs = data["config"]["algorithm"].get("disambiguate") if dis_orgs: if not data.get("disambiguate", None): data["disambiguate"] = {"genome_build": data["genome_build"], "base": True} out.append([data]) # handle the instance where a single organism is disambiguated if isinstance(dis_orgs, six.string_types): dis_orgs = [dis_orgs] for dis_org in dis_orgs: dis_data = copy.deepcopy(data) dis_data["disambiguate"] = {"genome_build": dis_org} dis_data["genome_build"] = dis_org dis_data["config"]["algorithm"]["effects"] = False dis_data = run_info.add_reference_resources(dis_data) out.append([dis_data]) else: out.append([data]) return out
[ "def", "split", "(", "*", "items", ")", ":", "out", "=", "[", "]", "for", "data", "in", "[", "x", "[", "0", "]", "for", "x", "in", "items", "]", ":", "dis_orgs", "=", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", ...
Split samples into all possible genomes for alignment.
[ "Split", "samples", "into", "all", "possible", "genomes", "for", "alignment", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L28-L51
223,415
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
resolve
def resolve(items, run_parallel): """Combine aligned and split samples into final set of disambiguated reads. """ out = [] to_process = collections.defaultdict(list) for data in [x[0] for x in items]: if "disambiguate" in data: split_part = tuple([int(x) for x in data["align_split"].split("-")]) if data.get("combine") else None to_process[(dd.get_sample_name(data), split_part)].append(data) else: out.append([data]) if len(to_process) > 0: dis1 = run_parallel("run_disambiguate", [(xs, xs[0]["config"]) for xs in to_process.values()]) disambigs_by_name = collections.defaultdict(list) print(len(dis1)) for xs in dis1: assert len(xs) == 1 data = xs[0] disambigs_by_name[dd.get_sample_name(data)].append(data) dis2 = run_parallel("disambiguate_merge_extras", [(xs, xs[0]["config"]) for xs in disambigs_by_name.values()]) else: dis2 = [] return out + dis2
python
def resolve(items, run_parallel): """Combine aligned and split samples into final set of disambiguated reads. """ out = [] to_process = collections.defaultdict(list) for data in [x[0] for x in items]: if "disambiguate" in data: split_part = tuple([int(x) for x in data["align_split"].split("-")]) if data.get("combine") else None to_process[(dd.get_sample_name(data), split_part)].append(data) else: out.append([data]) if len(to_process) > 0: dis1 = run_parallel("run_disambiguate", [(xs, xs[0]["config"]) for xs in to_process.values()]) disambigs_by_name = collections.defaultdict(list) print(len(dis1)) for xs in dis1: assert len(xs) == 1 data = xs[0] disambigs_by_name[dd.get_sample_name(data)].append(data) dis2 = run_parallel("disambiguate_merge_extras", [(xs, xs[0]["config"]) for xs in disambigs_by_name.values()]) else: dis2 = [] return out + dis2
[ "def", "resolve", "(", "items", ",", "run_parallel", ")", ":", "out", "=", "[", "]", "to_process", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "[", "x", "[", "0", "]", "for", "x", "in", "items", "]", ":", "if", ...
Combine aligned and split samples into final set of disambiguated reads.
[ "Combine", "aligned", "and", "split", "samples", "into", "final", "set", "of", "disambiguated", "reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L53-L77
223,416
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
merge_extras
def merge_extras(items, config): """Merge extra disambiguated reads into a final BAM file. """ final = {} for extra_name in items[0]["disambiguate"].keys(): in_files = [] for data in items: in_files.append(data["disambiguate"][extra_name]) out_file = "%s-allmerged%s" % os.path.splitext(in_files[0]) if in_files[0].endswith(".bam"): merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), items[0], out_file=out_file) else: assert extra_name == "summary", extra_name merged_file = _merge_summary(in_files, out_file, items[0]) final[extra_name] = merged_file out = [] for data in items: data["disambiguate"] = final out.append([data]) return out
python
def merge_extras(items, config): """Merge extra disambiguated reads into a final BAM file. """ final = {} for extra_name in items[0]["disambiguate"].keys(): in_files = [] for data in items: in_files.append(data["disambiguate"][extra_name]) out_file = "%s-allmerged%s" % os.path.splitext(in_files[0]) if in_files[0].endswith(".bam"): merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), items[0], out_file=out_file) else: assert extra_name == "summary", extra_name merged_file = _merge_summary(in_files, out_file, items[0]) final[extra_name] = merged_file out = [] for data in items: data["disambiguate"] = final out.append([data]) return out
[ "def", "merge_extras", "(", "items", ",", "config", ")", ":", "final", "=", "{", "}", "for", "extra_name", "in", "items", "[", "0", "]", "[", "\"disambiguate\"", "]", ".", "keys", "(", ")", ":", "in_files", "=", "[", "]", "for", "data", "in", "item...
Merge extra disambiguated reads into a final BAM file.
[ "Merge", "extra", "disambiguated", "reads", "into", "a", "final", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L79-L99
223,417
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
_merge_summary
def _merge_summary(in_files, out_file, data): """Create one big summary file for disambiguation from multiple splits. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for i, in_file in enumerate(in_files): with open(in_file) as in_handle: for j, line in enumerate(in_handle): if j == 0: if i == 0: out_handle.write(line) else: out_handle.write(line) return out_file
python
def _merge_summary(in_files, out_file, data): """Create one big summary file for disambiguation from multiple splits. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for i, in_file in enumerate(in_files): with open(in_file) as in_handle: for j, line in enumerate(in_handle): if j == 0: if i == 0: out_handle.write(line) else: out_handle.write(line) return out_file
[ "def", "_merge_summary", "(", "in_files", ",", "out_file", ",", "data", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "with", "open", ...
Create one big summary file for disambiguation from multiple splits.
[ "Create", "one", "big", "summary", "file", "for", "disambiguation", "from", "multiple", "splits", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L101-L115
223,418
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
_run_python
def _run_python(work_bam_a, work_bam_b, out_dir, aligner, prefix, items): """Run python version of disambiguation """ Args = collections.namedtuple("Args", "A B output_dir intermediate_dir " "no_sort prefix aligner") args = Args(work_bam_a, work_bam_b, out_dir, out_dir, True, "", aligner) disambiguate_main(args)
python
def _run_python(work_bam_a, work_bam_b, out_dir, aligner, prefix, items): """Run python version of disambiguation """ Args = collections.namedtuple("Args", "A B output_dir intermediate_dir " "no_sort prefix aligner") args = Args(work_bam_a, work_bam_b, out_dir, out_dir, True, "", aligner) disambiguate_main(args)
[ "def", "_run_python", "(", "work_bam_a", ",", "work_bam_b", ",", "out_dir", ",", "aligner", ",", "prefix", ",", "items", ")", ":", "Args", "=", "collections", ".", "namedtuple", "(", "\"Args\"", ",", "\"A B output_dir intermediate_dir \"", "\"no_sort prefix aligner\...
Run python version of disambiguation
[ "Run", "python", "version", "of", "disambiguation" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L153-L159
223,419
bcbio/bcbio-nextgen
bcbio/cwl/main.py
run
def run(args): """Run a CWL preparation pipeline. """ dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig) integrations = args.integrations if hasattr(args, "integrations") else {} world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations) create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
python
def run(args): """Run a CWL preparation pipeline. """ dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig) integrations = args.integrations if hasattr(args, "integrations") else {} world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations) create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
[ "def", "run", "(", "args", ")", ":", "dirs", ",", "config", ",", "run_info_yaml", "=", "run_info", ".", "prep_system", "(", "args", ".", "sample_config", ",", "args", ".", "systemconfig", ")", "integrations", "=", "args", ".", "integrations", "if", "hasatt...
Run a CWL preparation pipeline.
[ "Run", "a", "CWL", "preparation", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/main.py#L6-L12
223,420
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
assign_interval
def assign_interval(data): """Identify coverage based on percent of genome covered and relation to targets. Classifies coverage into 3 categories: - genome: Full genome coverage - regional: Regional coverage, like exome capture, with off-target reads - amplicon: Amplication based regional coverage without off-target reads """ if not dd.get_coverage_interval(data): vrs = dd.get_variant_regions_merged(data) callable_file = dd.get_sample_callable(data) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() else: callable_size = pybedtools.BedTool(callable_file).total_coverage() total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) genome_cov_pct = callable_size / float(total_size) if genome_cov_pct > GENOME_COV_THRESH: cov_interval = "genome" offtarget_pct = 0.0 elif not vrs: cov_interval = "regional" offtarget_pct = 0.0 else: offtarget_pct = _count_offtarget(data, dd.get_align_bam(data) or dd.get_work_bam(data), vrs or callable_file, "variant_regions") if offtarget_pct > OFFTARGET_THRESH: cov_interval = "regional" else: cov_interval = "amplicon" logger.info("%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage" % (dd.get_sample_name(data), cov_interval, genome_cov_pct * 100.0, offtarget_pct * 100.0)) data["config"]["algorithm"]["coverage_interval"] = cov_interval return data
python
def assign_interval(data): """Identify coverage based on percent of genome covered and relation to targets. Classifies coverage into 3 categories: - genome: Full genome coverage - regional: Regional coverage, like exome capture, with off-target reads - amplicon: Amplication based regional coverage without off-target reads """ if not dd.get_coverage_interval(data): vrs = dd.get_variant_regions_merged(data) callable_file = dd.get_sample_callable(data) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() else: callable_size = pybedtools.BedTool(callable_file).total_coverage() total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) genome_cov_pct = callable_size / float(total_size) if genome_cov_pct > GENOME_COV_THRESH: cov_interval = "genome" offtarget_pct = 0.0 elif not vrs: cov_interval = "regional" offtarget_pct = 0.0 else: offtarget_pct = _count_offtarget(data, dd.get_align_bam(data) or dd.get_work_bam(data), vrs or callable_file, "variant_regions") if offtarget_pct > OFFTARGET_THRESH: cov_interval = "regional" else: cov_interval = "amplicon" logger.info("%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage" % (dd.get_sample_name(data), cov_interval, genome_cov_pct * 100.0, offtarget_pct * 100.0)) data["config"]["algorithm"]["coverage_interval"] = cov_interval return data
[ "def", "assign_interval", "(", "data", ")", ":", "if", "not", "dd", ".", "get_coverage_interval", "(", "data", ")", ":", "vrs", "=", "dd", ".", "get_variant_regions_merged", "(", "data", ")", "callable_file", "=", "dd", ".", "get_sample_callable", "(", "data...
Identify coverage based on percent of genome covered and relation to targets. Classifies coverage into 3 categories: - genome: Full genome coverage - regional: Regional coverage, like exome capture, with off-target reads - amplicon: Amplication based regional coverage without off-target reads
[ "Identify", "coverage", "based", "on", "percent", "of", "genome", "covered", "and", "relation", "to", "targets", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L29-L62
223,421
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
calculate
def calculate(bam_file, data, sv_bed): """Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue; """ params = {"min": dd.get_coverage_depth_min(data)} variant_regions = dd.get_variant_regions_merged(data) if not variant_regions: variant_regions = _create_genome_regions(data) # Back compatible with previous pre-mosdepth callable files callable_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))), "%s-coverage.callable.bed" % (dd.get_sample_name(data))) if not utils.file_uptodate(callable_file, bam_file): vr_quantize = ("0:1:%s:" % (params["min"]), ["NO_COVERAGE", "LOW_COVERAGE", "CALLABLE"]) to_calculate = [("variant_regions", variant_regions, vr_quantize, None, "coverage_perbase" in dd.get_tools_on(data)), ("sv_regions", bedutils.clean_file(sv_bed, data, prefix="svregions-"), None, None, False), ("coverage", bedutils.clean_file(dd.get_coverage(data), data, prefix="cov-"), None, DEPTH_THRESHOLDS, False)] depth_files = {} for target_name, region_bed, quantize, thresholds, per_base in to_calculate: if region_bed: cur_depth = {} depth_info = run_mosdepth(data, target_name, region_bed, quantize=quantize, thresholds=thresholds, per_base=per_base) for attr in ("dist", "regions", "thresholds", "per_base"): val = getattr(depth_info, attr, None) if val: cur_depth[attr] = val depth_files[target_name] = cur_depth if target_name == "variant_regions": callable_file = depth_info.quantize else: depth_files = {} final_callable = _subset_to_variant_regions(callable_file, variant_regions, data) return final_callable, depth_files
python
def calculate(bam_file, data, sv_bed): """Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue; """ params = {"min": dd.get_coverage_depth_min(data)} variant_regions = dd.get_variant_regions_merged(data) if not variant_regions: variant_regions = _create_genome_regions(data) # Back compatible with previous pre-mosdepth callable files callable_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))), "%s-coverage.callable.bed" % (dd.get_sample_name(data))) if not utils.file_uptodate(callable_file, bam_file): vr_quantize = ("0:1:%s:" % (params["min"]), ["NO_COVERAGE", "LOW_COVERAGE", "CALLABLE"]) to_calculate = [("variant_regions", variant_regions, vr_quantize, None, "coverage_perbase" in dd.get_tools_on(data)), ("sv_regions", bedutils.clean_file(sv_bed, data, prefix="svregions-"), None, None, False), ("coverage", bedutils.clean_file(dd.get_coverage(data), data, prefix="cov-"), None, DEPTH_THRESHOLDS, False)] depth_files = {} for target_name, region_bed, quantize, thresholds, per_base in to_calculate: if region_bed: cur_depth = {} depth_info = run_mosdepth(data, target_name, region_bed, quantize=quantize, thresholds=thresholds, per_base=per_base) for attr in ("dist", "regions", "thresholds", "per_base"): val = getattr(depth_info, attr, None) if val: cur_depth[attr] = val depth_files[target_name] = cur_depth if target_name == "variant_regions": callable_file = depth_info.quantize else: depth_files = {} final_callable = _subset_to_variant_regions(callable_file, variant_regions, data) return final_callable, depth_files
[ "def", "calculate", "(", "bam_file", ",", "data", ",", "sv_bed", ")", ":", "params", "=", "{", "\"min\"", ":", "dd", ".", "get_coverage_depth_min", "(", "data", ")", "}", "variant_regions", "=", "dd", ".", "get_variant_regions_merged", "(", "data", ")", "i...
Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue;
[ "Calculate", "coverage", "in", "parallel", "using", "mosdepth", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L73-L111
223,422
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
_create_genome_regions
def _create_genome_regions(data): """Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage", dd.get_sample_name(data))) variant_regions = os.path.join(work_dir, "target-genome.bed") with file_transaction(data, variant_regions) as tx_variant_regions: with open(tx_variant_regions, "w") as out_handle: for c in shared.get_noalt_contigs(data): out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size)) return variant_regions
python
def _create_genome_regions(data): """Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage", dd.get_sample_name(data))) variant_regions = os.path.join(work_dir, "target-genome.bed") with file_transaction(data, variant_regions) as tx_variant_regions: with open(tx_variant_regions, "w") as out_handle: for c in shared.get_noalt_contigs(data): out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size)) return variant_regions
[ "def", "_create_genome_regions", "(", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"coverage\"", ",", "dd", ".", "get_sample_name", "(", "da...
Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis.
[ "Create", "whole", "genome", "contigs", "we", "want", "to", "process", "only", "non", "-", "alts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L113-L124
223,423
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
_subset_to_variant_regions
def _subset_to_variant_regions(callable_file, variant_regions, data): """Subset output callable file to only variant regions of interest. """ out_file = "%s-vrsubset.bed" % utils.splitext_plus(callable_file)[0] if not utils.file_uptodate(out_file, callable_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(callable_file) as in_handle: pybedtools.BedTool(in_handle).intersect(variant_regions).saveas(tx_out_file) return out_file
python
def _subset_to_variant_regions(callable_file, variant_regions, data): """Subset output callable file to only variant regions of interest. """ out_file = "%s-vrsubset.bed" % utils.splitext_plus(callable_file)[0] if not utils.file_uptodate(out_file, callable_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(callable_file) as in_handle: pybedtools.BedTool(in_handle).intersect(variant_regions).saveas(tx_out_file) return out_file
[ "def", "_subset_to_variant_regions", "(", "callable_file", ",", "variant_regions", ",", "data", ")", ":", "out_file", "=", "\"%s-vrsubset.bed\"", "%", "utils", ".", "splitext_plus", "(", "callable_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptod...
Subset output callable file to only variant regions of interest.
[ "Subset", "output", "callable", "file", "to", "only", "variant", "regions", "of", "interest", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L126-L134
223,424
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
_average_genome_coverage
def _average_genome_coverage(data, bam_file): """Quickly calculate average coverage for whole genome files using indices. Includes all reads, with duplicates. Uses sampling of 10M reads. """ total = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) read_counts = sum(x.aligned for x in bam.idxstats(bam_file, data)) with pysam.Samfile(bam_file, "rb") as pysam_bam: read_size = np.median(list(itertools.islice((a.query_length for a in pysam_bam.fetch()), int(1e7)))) avg_cov = float(read_counts * read_size) / total return avg_cov
python
def _average_genome_coverage(data, bam_file): """Quickly calculate average coverage for whole genome files using indices. Includes all reads, with duplicates. Uses sampling of 10M reads. """ total = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) read_counts = sum(x.aligned for x in bam.idxstats(bam_file, data)) with pysam.Samfile(bam_file, "rb") as pysam_bam: read_size = np.median(list(itertools.islice((a.query_length for a in pysam_bam.fetch()), int(1e7)))) avg_cov = float(read_counts * read_size) / total return avg_cov
[ "def", "_average_genome_coverage", "(", "data", ",", "bam_file", ")", ":", "total", "=", "sum", "(", "[", "c", ".", "size", "for", "c", "in", "ref", ".", "file_contigs", "(", "dd", ".", "get_ref_file", "(", "data", ")", ",", "data", "[", "\"config\"", ...
Quickly calculate average coverage for whole genome files using indices. Includes all reads, with duplicates. Uses sampling of 10M reads.
[ "Quickly", "calculate", "average", "coverage", "for", "whole", "genome", "files", "using", "indices", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L171-L181
223,425
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
regions_coverage
def regions_coverage(bed_file, target_name, data): """Generate coverage over regions of interest using mosdepth. """ ready_bed = tz.get_in(["depth", target_name, "regions"], data) if ready_bed: return ready_bed else: return run_mosdepth(data, target_name, bed_file).regions
python
def regions_coverage(bed_file, target_name, data): """Generate coverage over regions of interest using mosdepth. """ ready_bed = tz.get_in(["depth", target_name, "regions"], data) if ready_bed: return ready_bed else: return run_mosdepth(data, target_name, bed_file).regions
[ "def", "regions_coverage", "(", "bed_file", ",", "target_name", ",", "data", ")", ":", "ready_bed", "=", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "target_name", ",", "\"regions\"", "]", ",", "data", ")", "if", "ready_bed", ":", "return", "ready_bed...
Generate coverage over regions of interest using mosdepth.
[ "Generate", "coverage", "over", "regions", "of", "interest", "using", "mosdepth", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L197-L204
223,426
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
coverage_region_detailed_stats
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir): """ Calculate coverage at different completeness cutoff for region in coverage option. """ if bed_file and utils.file_exists(bed_file): ready_depth = tz.get_in(["depth", target_name], data) if ready_depth: cov_file = ready_depth["regions"] dist_file = ready_depth["dist"] thresholds_file = ready_depth.get("thresholds") out_cov_file = os.path.join(out_dir, os.path.basename(cov_file)) out_dist_file = os.path.join(out_dir, os.path.basename(dist_file)) out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \ if thresholds_file and os.path.isfile(thresholds_file) else None if not utils.file_uptodate(out_cov_file, cov_file): utils.copy_plus(cov_file, out_cov_file) utils.copy_plus(dist_file, out_dist_file) utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else []) return []
python
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir): """ Calculate coverage at different completeness cutoff for region in coverage option. """ if bed_file and utils.file_exists(bed_file): ready_depth = tz.get_in(["depth", target_name], data) if ready_depth: cov_file = ready_depth["regions"] dist_file = ready_depth["dist"] thresholds_file = ready_depth.get("thresholds") out_cov_file = os.path.join(out_dir, os.path.basename(cov_file)) out_dist_file = os.path.join(out_dir, os.path.basename(dist_file)) out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \ if thresholds_file and os.path.isfile(thresholds_file) else None if not utils.file_uptodate(out_cov_file, cov_file): utils.copy_plus(cov_file, out_cov_file) utils.copy_plus(dist_file, out_dist_file) utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else []) return []
[ "def", "coverage_region_detailed_stats", "(", "target_name", ",", "bed_file", ",", "data", ",", "out_dir", ")", ":", "if", "bed_file", "and", "utils", ".", "file_exists", "(", "bed_file", ")", ":", "ready_depth", "=", "tz", ".", "get_in", "(", "[", "\"depth\...
Calculate coverage at different completeness cutoff for region in coverage option.
[ "Calculate", "coverage", "at", "different", "completeness", "cutoff", "for", "region", "in", "coverage", "option", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L249-L269
223,427
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
get_coords
def get_coords(data): """Retrieve coordinates of genes of interest for prioritization. Can read from CIViC input data or a supplied BED file of chrom, start, end and gene information. """ for category, vtypes in [("LOH", {"LOSS", "HETEROZYGOSITY"}), ("amplification", {"AMPLIFICATION"})]: out = tz.get_in([category, dd.get_genome_build(data)], _COORDS, {}) priority_file = dd.get_svprioritize(data) if priority_file: if os.path.basename(priority_file).find("civic") >= 0: for chrom, start, end, gene in _civic_regions(priority_file, vtypes, dd.get_disease(data)): out[gene] = (chrom, start, end) elif os.path.basename(priority_file).find(".bed") >= 0: for line in utils.open_gzipsafe(priority_file): parts = line.strip().split("\t") if len(parts) >= 4: chrom, start, end, gene = parts[:4] out[gene] = (chrom, int(start), int(end)) yield category, out
python
def get_coords(data): """Retrieve coordinates of genes of interest for prioritization. Can read from CIViC input data or a supplied BED file of chrom, start, end and gene information. """ for category, vtypes in [("LOH", {"LOSS", "HETEROZYGOSITY"}), ("amplification", {"AMPLIFICATION"})]: out = tz.get_in([category, dd.get_genome_build(data)], _COORDS, {}) priority_file = dd.get_svprioritize(data) if priority_file: if os.path.basename(priority_file).find("civic") >= 0: for chrom, start, end, gene in _civic_regions(priority_file, vtypes, dd.get_disease(data)): out[gene] = (chrom, start, end) elif os.path.basename(priority_file).find(".bed") >= 0: for line in utils.open_gzipsafe(priority_file): parts = line.strip().split("\t") if len(parts) >= 4: chrom, start, end, gene = parts[:4] out[gene] = (chrom, int(start), int(end)) yield category, out
[ "def", "get_coords", "(", "data", ")", ":", "for", "category", ",", "vtypes", "in", "[", "(", "\"LOH\"", ",", "{", "\"LOSS\"", ",", "\"HETEROZYGOSITY\"", "}", ")", ",", "(", "\"amplification\"", ",", "{", "\"AMPLIFICATION\"", "}", ")", "]", ":", "out", ...
Retrieve coordinates of genes of interest for prioritization. Can read from CIViC input data or a supplied BED file of chrom, start, end and gene information.
[ "Retrieve", "coordinates", "of", "genes", "of", "interest", "for", "prioritization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L29-L49
223,428
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_civic_regions
def _civic_regions(civic_file, variant_types=None, diseases=None, drugs=None): """Retrieve gene regions and names filtered by variant_types and diseases. """ if isinstance(diseases, six.string_types): diseases = [diseases] with utils.open_gzipsafe(civic_file) as in_handle: reader = csv.reader(in_handle, delimiter="\t") for chrom, start, end, info_str in reader: info = edn_loads(info_str) if not variant_types or _matches(info["support"]["variants"], variant_types): if not diseases or _matches(info["support"]["diseases"], diseases): if not drugs or _matches(info["support"]["drugs"], drugs): yield (chrom, int(start), int(end), list(info["name"])[0])
python
def _civic_regions(civic_file, variant_types=None, diseases=None, drugs=None): """Retrieve gene regions and names filtered by variant_types and diseases. """ if isinstance(diseases, six.string_types): diseases = [diseases] with utils.open_gzipsafe(civic_file) as in_handle: reader = csv.reader(in_handle, delimiter="\t") for chrom, start, end, info_str in reader: info = edn_loads(info_str) if not variant_types or _matches(info["support"]["variants"], variant_types): if not diseases or _matches(info["support"]["diseases"], diseases): if not drugs or _matches(info["support"]["drugs"], drugs): yield (chrom, int(start), int(end), list(info["name"])[0])
[ "def", "_civic_regions", "(", "civic_file", ",", "variant_types", "=", "None", ",", "diseases", "=", "None", ",", "drugs", "=", "None", ")", ":", "if", "isinstance", "(", "diseases", ",", "six", ".", "string_types", ")", ":", "diseases", "=", "[", "disea...
Retrieve gene regions and names filtered by variant_types and diseases.
[ "Retrieve", "gene", "regions", "and", "names", "filtered", "by", "variant_types", "and", "diseases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L58-L70
223,429
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
summary_status
def summary_status(call, data): """Retrieve status in regions of interest, along with heterogeneity metrics. Provides output with overall purity and ploidy, along with region specific calls. """ out_file = None if call.get("vrn_file") and os.path.exists(call.get("vrn_file")): out_file = os.path.join(os.path.dirname(call["vrn_file"]), "%s-%s-lohsummary.yaml" % (dd.get_sample_name(data), call["variantcaller"])) if not utils.file_uptodate(out_file, call["vrn_file"]): out = {} if call["variantcaller"] == "titancna": out.update(_titancna_summary(call, data)) pass elif call["variantcaller"] == "purecn": out.update(_purecn_summary(call, data)) if out: out["description"] = dd.get_sample_name(data) out["variantcaller"] = call["variantcaller"] with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file if out_file and os.path.exists(out_file) else None
python
def summary_status(call, data): """Retrieve status in regions of interest, along with heterogeneity metrics. Provides output with overall purity and ploidy, along with region specific calls. """ out_file = None if call.get("vrn_file") and os.path.exists(call.get("vrn_file")): out_file = os.path.join(os.path.dirname(call["vrn_file"]), "%s-%s-lohsummary.yaml" % (dd.get_sample_name(data), call["variantcaller"])) if not utils.file_uptodate(out_file, call["vrn_file"]): out = {} if call["variantcaller"] == "titancna": out.update(_titancna_summary(call, data)) pass elif call["variantcaller"] == "purecn": out.update(_purecn_summary(call, data)) if out: out["description"] = dd.get_sample_name(data) out["variantcaller"] = call["variantcaller"] with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file if out_file and os.path.exists(out_file) else None
[ "def", "summary_status", "(", "call", ",", "data", ")", ":", "out_file", "=", "None", "if", "call", ".", "get", "(", "\"vrn_file\"", ")", "and", "os", ".", "path", ".", "exists", "(", "call", ".", "get", "(", "\"vrn_file\"", ")", ")", ":", "out_file"...
Retrieve status in regions of interest, along with heterogeneity metrics. Provides output with overall purity and ploidy, along with region specific calls.
[ "Retrieve", "status", "in", "regions", "of", "interest", "along", "with", "heterogeneity", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L72-L95
223,430
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_check_copy_number_changes
def _check_copy_number_changes(svtype, cn, minor_cn, data): """Check if copy number changes match the expected svtype. """ if svtype == "LOH" and minor_cn == 0: return svtype elif svtype == "amplification" and cn > dd.get_ploidy(data): return svtype else: return "std"
python
def _check_copy_number_changes(svtype, cn, minor_cn, data): """Check if copy number changes match the expected svtype. """ if svtype == "LOH" and minor_cn == 0: return svtype elif svtype == "amplification" and cn > dd.get_ploidy(data): return svtype else: return "std"
[ "def", "_check_copy_number_changes", "(", "svtype", ",", "cn", ",", "minor_cn", ",", "data", ")", ":", "if", "svtype", "==", "\"LOH\"", "and", "minor_cn", "==", "0", ":", "return", "svtype", "elif", "svtype", "==", "\"amplification\"", "and", "cn", ">", "d...
Check if copy number changes match the expected svtype.
[ "Check", "if", "copy", "number", "changes", "match", "the", "expected", "svtype", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L97-L105
223,431
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_titancna_summary
def _titancna_summary(call, data): """Summarize purity, ploidy and LOH for TitanCNA. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["subclones"]) as in_handle: header = in_handle.readline().strip().split() for line in in_handle: val = dict(zip(header, line.strip().split())) start = int(val["Start_Position.bp."]) end = int(val["End_Position.bp."]) for region, cur_coords in coords.items(): if val["Chromosome"] == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(val["Copy_Number"]), _to_cn(val["MinorCN"]), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t"))) out["purity"] = vals["purity"] out["ploidy"] = vals["ploidy"] return out
python
def _titancna_summary(call, data): """Summarize purity, ploidy and LOH for TitanCNA. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["subclones"]) as in_handle: header = in_handle.readline().strip().split() for line in in_handle: val = dict(zip(header, line.strip().split())) start = int(val["Start_Position.bp."]) end = int(val["End_Position.bp."]) for region, cur_coords in coords.items(): if val["Chromosome"] == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(val["Copy_Number"]), _to_cn(val["MinorCN"]), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t"))) out["purity"] = vals["purity"] out["ploidy"] = vals["ploidy"] return out
[ "def", "_titancna_summary", "(", "call", ",", "data", ")", ":", "out", "=", "{", "}", "for", "svtype", ",", "coords", "in", "get_coords", "(", "data", ")", ":", "cur_calls", "=", "{", "k", ":", "collections", ".", "defaultdict", "(", "int", ")", "for...
Summarize purity, ploidy and LOH for TitanCNA.
[ "Summarize", "purity", "ploidy", "and", "LOH", "for", "TitanCNA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L110-L132
223,432
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_purecn_summary
def _purecn_summary(call, data): """Summarize purity, ploidy and LOH for PureCN. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["loh"]) as in_handle: in_handle.readline() # header for line in in_handle: _, chrom, start, end, _, cn, minor_cn = line.split(",")[:7] start = int(start) end = int(end) for region, cur_coords in coords.items(): if chrom == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(cn), _to_cn(minor_cn), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","), in_handle.readline().strip().split(","))) out["purity"] = vals["Purity"] out["ploidy"] = vals["Ploidy"] return out
python
def _purecn_summary(call, data): """Summarize purity, ploidy and LOH for PureCN. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["loh"]) as in_handle: in_handle.readline() # header for line in in_handle: _, chrom, start, end, _, cn, minor_cn = line.split(",")[:7] start = int(start) end = int(end) for region, cur_coords in coords.items(): if chrom == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(cn), _to_cn(minor_cn), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","), in_handle.readline().strip().split(","))) out["purity"] = vals["Purity"] out["ploidy"] = vals["Ploidy"] return out
[ "def", "_purecn_summary", "(", "call", ",", "data", ")", ":", "out", "=", "{", "}", "for", "svtype", ",", "coords", "in", "get_coords", "(", "data", ")", ":", "cur_calls", "=", "{", "k", ":", "collections", ".", "defaultdict", "(", "int", ")", "for",...
Summarize purity, ploidy and LOH for PureCN.
[ "Summarize", "purity", "ploidy", "and", "LOH", "for", "PureCN", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L134-L155
223,433
bcbio/bcbio-nextgen
scripts/utils/plink_to_vcf.py
fix_vcf_line
def fix_vcf_line(parts, ref_base): """Orient VCF allele calls with respect to reference base. Handles cases with ref and variant swaps. strand complements. """ swap = {"1/1": "0/0", "0/1": "0/1", "0/0": "1/1", "./.": "./."} complements = {"G": "C", "A": "T", "C": "G", "T": "A", "N": "N"} varinfo, genotypes = fix_line_problems(parts) ref, var = varinfo[3:5] # non-reference regions or non-informative, can't do anything if ref_base in [None, "N"] or set(genotypes) == set(["./."]): varinfo = None # matching reference, all good elif ref_base == ref: assert ref_base == ref, (ref_base, parts) # swapped reference and alternate regions elif ref_base == var or ref in ["N", "0"]: varinfo[3] = var varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # reference is on alternate strand elif ref_base != ref and complements.get(ref) == ref_base: varinfo[3] = complements[ref] varinfo[4] = ",".join([complements[v] for v in var.split(",")]) # unspecified alternative base elif ref_base != ref and var in ["N", "0"]: varinfo[3] = ref_base varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # swapped and on alternate strand elif ref_base != ref and complements.get(var) == ref_base: varinfo[3] = complements[var] varinfo[4] = ",".join([complements[v] for v in ref.split(",")]) genotypes = [swap[x] for x in genotypes] else: print "Did not associate ref {0} with line: {1}".format( ref_base, varinfo) if varinfo is not None: return varinfo + genotypes
python
def fix_vcf_line(parts, ref_base): """Orient VCF allele calls with respect to reference base. Handles cases with ref and variant swaps. strand complements. """ swap = {"1/1": "0/0", "0/1": "0/1", "0/0": "1/1", "./.": "./."} complements = {"G": "C", "A": "T", "C": "G", "T": "A", "N": "N"} varinfo, genotypes = fix_line_problems(parts) ref, var = varinfo[3:5] # non-reference regions or non-informative, can't do anything if ref_base in [None, "N"] or set(genotypes) == set(["./."]): varinfo = None # matching reference, all good elif ref_base == ref: assert ref_base == ref, (ref_base, parts) # swapped reference and alternate regions elif ref_base == var or ref in ["N", "0"]: varinfo[3] = var varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # reference is on alternate strand elif ref_base != ref and complements.get(ref) == ref_base: varinfo[3] = complements[ref] varinfo[4] = ",".join([complements[v] for v in var.split(",")]) # unspecified alternative base elif ref_base != ref and var in ["N", "0"]: varinfo[3] = ref_base varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # swapped and on alternate strand elif ref_base != ref and complements.get(var) == ref_base: varinfo[3] = complements[var] varinfo[4] = ",".join([complements[v] for v in ref.split(",")]) genotypes = [swap[x] for x in genotypes] else: print "Did not associate ref {0} with line: {1}".format( ref_base, varinfo) if varinfo is not None: return varinfo + genotypes
[ "def", "fix_vcf_line", "(", "parts", ",", "ref_base", ")", ":", "swap", "=", "{", "\"1/1\"", ":", "\"0/0\"", ",", "\"0/1\"", ":", "\"0/1\"", ",", "\"0/0\"", ":", "\"1/1\"", ",", "\"./.\"", ":", "\"./.\"", "}", "complements", "=", "{", "\"G\"", ":", "\"...
Orient VCF allele calls with respect to reference base. Handles cases with ref and variant swaps. strand complements.
[ "Orient", "VCF", "allele", "calls", "with", "respect", "to", "reference", "base", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/plink_to_vcf.py#L79-L117
223,434
bcbio/bcbio-nextgen
scripts/utils/plink_to_vcf.py
fix_nonref_positions
def fix_nonref_positions(in_file, ref_file): """Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed. """ ignore_chrs = ["."] ref2bit = twobit.TwoBitFile(open(ref_file)) out_file = in_file.replace("-raw.vcf", ".vcf") with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.rstrip("\r\n").split("\t") pos = int(parts[1]) # handle chr/non-chr naming if parts[0] not in ref2bit.keys() and parts[0].replace("chr", "") in ref2bit.keys(): parts[0] = parts[0].replace("chr", "") # handle X chromosome elif parts[0] not in ref2bit.keys() and parts[0] == "23": for test in ["X", "chrX"]: if test in ref2bit.keys(): parts[0] == test ref_base = None if parts[0] not in ignore_chrs: try: ref_base = ref2bit[parts[0]].get(pos-1, pos).upper() except Exception as msg: print "Skipping line. Failed to retrieve reference base for %s\n%s" % (str(parts), msg) parts = fix_vcf_line(parts, ref_base) if parts is not None: out_handle.write("\t".join(parts) + "\n") return out_file
python
def fix_nonref_positions(in_file, ref_file): """Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed. """ ignore_chrs = ["."] ref2bit = twobit.TwoBitFile(open(ref_file)) out_file = in_file.replace("-raw.vcf", ".vcf") with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.rstrip("\r\n").split("\t") pos = int(parts[1]) # handle chr/non-chr naming if parts[0] not in ref2bit.keys() and parts[0].replace("chr", "") in ref2bit.keys(): parts[0] = parts[0].replace("chr", "") # handle X chromosome elif parts[0] not in ref2bit.keys() and parts[0] == "23": for test in ["X", "chrX"]: if test in ref2bit.keys(): parts[0] == test ref_base = None if parts[0] not in ignore_chrs: try: ref_base = ref2bit[parts[0]].get(pos-1, pos).upper() except Exception as msg: print "Skipping line. Failed to retrieve reference base for %s\n%s" % (str(parts), msg) parts = fix_vcf_line(parts, ref_base) if parts is not None: out_handle.write("\t".join(parts) + "\n") return out_file
[ "def", "fix_nonref_positions", "(", "in_file", ",", "ref_file", ")", ":", "ignore_chrs", "=", "[", "\".\"", "]", "ref2bit", "=", "twobit", ".", "TwoBitFile", "(", "open", "(", "ref_file", ")", ")", "out_file", "=", "in_file", ".", "replace", "(", "\"-raw.v...
Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed.
[ "Fix", "Genotyping", "VCF", "positions", "where", "the", "bases", "are", "all", "variants", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/plink_to_vcf.py#L119-L154
223,435
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
run
def run(items, background=None): """Detect copy number variations from batched set of samples using cn.mops. """ if not background: background = [] names = [tz.get_in(["rgnames", "sample"], x) for x in items + background] work_bams = [x["align_bam"] for x in items + background] if len(items + background) < 2: raise ValueError("cn.mops only works on batches with multiple samples") data = items[0] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", names[0], "cn_mops")) parallel = {"type": "local", "cores": data["config"]["algorithm"].get("num_cores", 1), "progs": ["delly"]} with pysam.Samfile(work_bams[0], "rb") as pysam_work_bam: chroms = [None] if _get_regional_bed_file(items[0]) else pysam_work_bam.references out_files = run_multicore(_run_on_chrom, [(chrom, work_bams, names, work_dir, items) for chrom in chroms], data["config"], parallel) out_file = _combine_out_files(out_files, work_dir, data) out = [] for data in items: if "sv" not in data: data["sv"] = [] data["sv"].append({"variantcaller": "cn_mops", "vrn_file": _prep_sample_cnvs(out_file, data)}) out.append(data) return out
python
def run(items, background=None): """Detect copy number variations from batched set of samples using cn.mops. """ if not background: background = [] names = [tz.get_in(["rgnames", "sample"], x) for x in items + background] work_bams = [x["align_bam"] for x in items + background] if len(items + background) < 2: raise ValueError("cn.mops only works on batches with multiple samples") data = items[0] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", names[0], "cn_mops")) parallel = {"type": "local", "cores": data["config"]["algorithm"].get("num_cores", 1), "progs": ["delly"]} with pysam.Samfile(work_bams[0], "rb") as pysam_work_bam: chroms = [None] if _get_regional_bed_file(items[0]) else pysam_work_bam.references out_files = run_multicore(_run_on_chrom, [(chrom, work_bams, names, work_dir, items) for chrom in chroms], data["config"], parallel) out_file = _combine_out_files(out_files, work_dir, data) out = [] for data in items: if "sv" not in data: data["sv"] = [] data["sv"].append({"variantcaller": "cn_mops", "vrn_file": _prep_sample_cnvs(out_file, data)}) out.append(data) return out
[ "def", "run", "(", "items", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "names", "=", "[", "tz", ".", "get_in", "(", "[", "\"rgnames\"", ",", "\"sample\"", "]", ",", "x", ")", "for", "x", ...
Detect copy number variations from batched set of samples using cn.mops.
[ "Detect", "copy", "number", "variations", "from", "batched", "set", "of", "samples", "using", "cn", ".", "mops", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L22-L48
223,436
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_combine_out_files
def _combine_out_files(chr_files, work_dir, data): """Concatenate all CNV calls into a single file. """ out_file = "%s.bed" % sshared.outname_from_inputs(chr_files) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for chr_file in chr_files: with open(chr_file) as in_handle: is_empty = in_handle.readline().startswith("track name=empty") if not is_empty: with open(chr_file) as in_handle: shutil.copyfileobj(in_handle, out_handle) return out_file
python
def _combine_out_files(chr_files, work_dir, data): """Concatenate all CNV calls into a single file. """ out_file = "%s.bed" % sshared.outname_from_inputs(chr_files) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for chr_file in chr_files: with open(chr_file) as in_handle: is_empty = in_handle.readline().startswith("track name=empty") if not is_empty: with open(chr_file) as in_handle: shutil.copyfileobj(in_handle, out_handle) return out_file
[ "def", "_combine_out_files", "(", "chr_files", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "\"%s.bed\"", "%", "sshared", ".", "outname_from_inputs", "(", "chr_files", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "wit...
Concatenate all CNV calls into a single file.
[ "Concatenate", "all", "CNV", "calls", "into", "a", "single", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L50-L63
223,437
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_prep_sample_cnvs
def _prep_sample_cnvs(cnv_file, data): """Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html """ import pybedtools sample_name = tz.get_in(["rgnames", "sample"], data) def make_names(name): return re.sub("[^\w.]", '.', name) def matches_sample_name(feat): return (feat.name == sample_name or feat.name == "X%s" % sample_name or feat.name == make_names(sample_name)) def update_sample_name(feat): feat.name = sample_name return feat sample_file = os.path.join(os.path.dirname(cnv_file), "%s-cnv.bed" % sample_name) if not utils.file_exists(sample_file): with file_transaction(data, sample_file) as tx_out_file: with shared.bedtools_tmpdir(data): pybedtools.BedTool(cnv_file).filter(matches_sample_name).each(update_sample_name).saveas(tx_out_file) return sample_file
python
def _prep_sample_cnvs(cnv_file, data): """Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html """ import pybedtools sample_name = tz.get_in(["rgnames", "sample"], data) def make_names(name): return re.sub("[^\w.]", '.', name) def matches_sample_name(feat): return (feat.name == sample_name or feat.name == "X%s" % sample_name or feat.name == make_names(sample_name)) def update_sample_name(feat): feat.name = sample_name return feat sample_file = os.path.join(os.path.dirname(cnv_file), "%s-cnv.bed" % sample_name) if not utils.file_exists(sample_file): with file_transaction(data, sample_file) as tx_out_file: with shared.bedtools_tmpdir(data): pybedtools.BedTool(cnv_file).filter(matches_sample_name).each(update_sample_name).saveas(tx_out_file) return sample_file
[ "def", "_prep_sample_cnvs", "(", "cnv_file", ",", "data", ")", ":", "import", "pybedtools", "sample_name", "=", "tz", ".", "get_in", "(", "[", "\"rgnames\"", ",", "\"sample\"", "]", ",", "data", ")", "def", "make_names", "(", "name", ")", ":", "return", ...
Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html
[ "Convert", "a", "multiple", "sample", "CNV", "file", "into", "a", "single", "BED", "file", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L65-L87
223,438
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_run_on_chrom
def _run_on_chrom(chrom, work_bams, names, work_dir, items): """Run cn.mops on work BAMs for a specific chromosome. """ local_sitelib = utils.R_sitelib() batch = sshared.get_cur_batch(items) ext = "-%s-cnv" % batch if batch else "-cnv" out_file = os.path.join(work_dir, "%s%s-%s.bed" % (os.path.splitext(os.path.basename(work_bams[0]))[0], ext, chrom if chrom else "all")) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(out_file)[0] with open(rcode, "w") as out_handle: out_handle.write(_script.format(prep_str=_prep_load_script(work_bams, names, chrom, items), out_file=tx_out_file, local_sitelib=local_sitelib)) rscript = utils.Rscript_cmd() try: do.run([rscript, "--no-environ", rcode], "cn.mops CNV detection", items[0], log_error=False) except subprocess.CalledProcessError as msg: # cn.mops errors out if no CNVs found. Just write an empty file. if _allowed_cnmops_errorstates(str(msg)): with open(tx_out_file, "w") as out_handle: out_handle.write('track name=empty description="No CNVs found"\n') else: logger.exception() raise return [out_file]
python
def _run_on_chrom(chrom, work_bams, names, work_dir, items): """Run cn.mops on work BAMs for a specific chromosome. """ local_sitelib = utils.R_sitelib() batch = sshared.get_cur_batch(items) ext = "-%s-cnv" % batch if batch else "-cnv" out_file = os.path.join(work_dir, "%s%s-%s.bed" % (os.path.splitext(os.path.basename(work_bams[0]))[0], ext, chrom if chrom else "all")) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(out_file)[0] with open(rcode, "w") as out_handle: out_handle.write(_script.format(prep_str=_prep_load_script(work_bams, names, chrom, items), out_file=tx_out_file, local_sitelib=local_sitelib)) rscript = utils.Rscript_cmd() try: do.run([rscript, "--no-environ", rcode], "cn.mops CNV detection", items[0], log_error=False) except subprocess.CalledProcessError as msg: # cn.mops errors out if no CNVs found. Just write an empty file. if _allowed_cnmops_errorstates(str(msg)): with open(tx_out_file, "w") as out_handle: out_handle.write('track name=empty description="No CNVs found"\n') else: logger.exception() raise return [out_file]
[ "def", "_run_on_chrom", "(", "chrom", ",", "work_bams", ",", "names", ",", "work_dir", ",", "items", ")", ":", "local_sitelib", "=", "utils", ".", "R_sitelib", "(", ")", "batch", "=", "sshared", ".", "get_cur_batch", "(", "items", ")", "ext", "=", "\"-%s...
Run cn.mops on work BAMs for a specific chromosome.
[ "Run", "cn", ".", "mops", "on", "work", "BAMs", "for", "a", "specific", "chromosome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L91-L117
223,439
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_get_regional_bed_file
def _get_regional_bed_file(data): """If we are running a non-genome analysis, pull the regional file for analysis. """ variant_regions = bedutils.merge_overlaps(tz.get_in(["config", "algorithm", "variant_regions"], data), data) is_genome = data["config"]["algorithm"].get("coverage_interval", "exome").lower() in ["genome"] if variant_regions and utils.file_exists(variant_regions) and not is_genome: return variant_regions
python
def _get_regional_bed_file(data): """If we are running a non-genome analysis, pull the regional file for analysis. """ variant_regions = bedutils.merge_overlaps(tz.get_in(["config", "algorithm", "variant_regions"], data), data) is_genome = data["config"]["algorithm"].get("coverage_interval", "exome").lower() in ["genome"] if variant_regions and utils.file_exists(variant_regions) and not is_genome: return variant_regions
[ "def", "_get_regional_bed_file", "(", "data", ")", ":", "variant_regions", "=", "bedutils", ".", "merge_overlaps", "(", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"variant_regions\"", "]", ",", "data", ")", ",", "data", ")", "i...
If we are running a non-genome analysis, pull the regional file for analysis.
[ "If", "we", "are", "running", "a", "non", "-", "genome", "analysis", "pull", "the", "regional", "file", "for", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L134-L141
223,440
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_population_load_script
def _population_load_script(work_bams, names, chrom, pairmode, items): """Prepare BAMs for assessing CNVs in a population. """ bed_file = _get_regional_bed_file(items[0]) if bed_file: return _population_prep_targeted.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode, bed_file=bed_file) else: return _population_prep.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode)
python
def _population_load_script(work_bams, names, chrom, pairmode, items): """Prepare BAMs for assessing CNVs in a population. """ bed_file = _get_regional_bed_file(items[0]) if bed_file: return _population_prep_targeted.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode, bed_file=bed_file) else: return _population_prep.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode)
[ "def", "_population_load_script", "(", "work_bams", ",", "names", ",", "chrom", ",", "pairmode", ",", "items", ")", ":", "bed_file", "=", "_get_regional_bed_file", "(", "items", "[", "0", "]", ")", "if", "bed_file", ":", "return", "_population_prep_targeted", ...
Prepare BAMs for assessing CNVs in a population.
[ "Prepare", "BAMs", "for", "assessing", "CNVs", "in", "a", "population", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L143-L152
223,441
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
tobam_cl
def tobam_cl(data, out_file, is_paired=False): """Prepare command line for producing de-duplicated sorted output. - If no deduplication, sort and prepare a BAM file. - If paired, then use samblaster and prepare discordant outputs. - If unpaired, use biobambam's bammarkduplicates """ do_dedup = _check_dedup(data) umi_consensus = dd.get_umi_consensus(data) with file_transaction(data, out_file) as tx_out_file: if not do_dedup: yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file) elif umi_consensus: yield (_sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file), tx_out_file) elif is_paired and _need_sr_disc_reads(data) and not _too_many_contigs(dd.get_ref_file(data)): sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0] disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0] with file_transaction(data, sr_file) as tx_sr_file: with file_transaction(data, disc_file) as tx_disc_file: yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file), tx_out_file) else: yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
python
def tobam_cl(data, out_file, is_paired=False): """Prepare command line for producing de-duplicated sorted output. - If no deduplication, sort and prepare a BAM file. - If paired, then use samblaster and prepare discordant outputs. - If unpaired, use biobambam's bammarkduplicates """ do_dedup = _check_dedup(data) umi_consensus = dd.get_umi_consensus(data) with file_transaction(data, out_file) as tx_out_file: if not do_dedup: yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file) elif umi_consensus: yield (_sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file), tx_out_file) elif is_paired and _need_sr_disc_reads(data) and not _too_many_contigs(dd.get_ref_file(data)): sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0] disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0] with file_transaction(data, sr_file) as tx_sr_file: with file_transaction(data, disc_file) as tx_disc_file: yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file), tx_out_file) else: yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
[ "def", "tobam_cl", "(", "data", ",", "out_file", ",", "is_paired", "=", "False", ")", ":", "do_dedup", "=", "_check_dedup", "(", "data", ")", "umi_consensus", "=", "dd", ".", "get_umi_consensus", "(", "data", ")", "with", "file_transaction", "(", "data", "...
Prepare command line for producing de-duplicated sorted output. - If no deduplication, sort and prepare a BAM file. - If paired, then use samblaster and prepare discordant outputs. - If unpaired, use biobambam's bammarkduplicates
[ "Prepare", "command", "line", "for", "producing", "de", "-", "duplicated", "sorted", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L30-L52
223,442
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_get_cores_memory
def _get_cores_memory(data, downscale=2): """Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication. """ resources = config_utils.get_resources("samtools", data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), downscale, "decrease").upper() return num_cores, max_mem
python
def _get_cores_memory(data, downscale=2): """Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication. """ resources = config_utils.get_resources("samtools", data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), downscale, "decrease").upper() return num_cores, max_mem
[ "def", "_get_cores_memory", "(", "data", ",", "downscale", "=", "2", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "num_cores", "=", "data", "[", "\"config\"", "]", "[", "\"...
Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication.
[ "Retrieve", "cores", "and", "memory", "using", "samtools", "as", "baseline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L69-L78
223,443
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
sam_to_sortbam_cl
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False): """Convert to sorted BAM output. Set name_sort to True to sort reads by queryname """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] sort_flag = "-n" if name_sort else "" return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} " "-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
python
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False): """Convert to sorted BAM output. Set name_sort to True to sort reads by queryname """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] sort_flag = "-n" if name_sort else "" return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} " "-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
[ "def", "sam_to_sortbam_cl", "(", "data", ",", "tx_out_file", ",", "name_sort", "=", "False", ")", ":", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "cores", ",", "mem", "=", "_get_cores_m...
Convert to sorted BAM output. Set name_sort to True to sort reads by queryname
[ "Convert", "to", "sorted", "BAM", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L80-L90
223,444
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
samblaster_dedup_sort
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file): """Deduplicate and sort with samblaster, produces split read and discordant pair files. """ samblaster = config_utils.get_program("samblaster", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] tobam_cmd = ("{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} {out_file} -") # full BAM -- associate more memory and cores cores, mem = _get_cores_memory(data, downscale=2) # Potentially downsample to maximum coverage here if not splitting and whole genome sample ds_cmd = None if data.get("align_split") else bam.get_maxcov_downsample_cl(data, "samtools") sort_opt = "-n" if data.get("align_split") and dd.get_mark_duplicates(data) else "" if ds_cmd: dedup_cmd = "%s %s > %s" % (tobam_cmd.format(out_file="", dext="full", **locals()), ds_cmd, tx_out_file) else: dedup_cmd = tobam_cmd.format(out_file="-o %s" % tx_out_file, dext="full", **locals()) # split and discordant BAMs -- give less memory/cores since smaller files sort_opt = "" cores, mem = _get_cores_memory(data, downscale=4) splitter_cmd = tobam_cmd.format(out_file="-o %s" % tx_sr_file, dext="spl", **locals()) discordant_cmd = tobam_cmd.format(out_file="-o %s" % tx_disc_file, dext="disc", **locals()) # samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem cmd = ("{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) " "| {dedup_cmd}") return cmd.format(**locals())
python
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file): """Deduplicate and sort with samblaster, produces split read and discordant pair files. """ samblaster = config_utils.get_program("samblaster", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] tobam_cmd = ("{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} {out_file} -") # full BAM -- associate more memory and cores cores, mem = _get_cores_memory(data, downscale=2) # Potentially downsample to maximum coverage here if not splitting and whole genome sample ds_cmd = None if data.get("align_split") else bam.get_maxcov_downsample_cl(data, "samtools") sort_opt = "-n" if data.get("align_split") and dd.get_mark_duplicates(data) else "" if ds_cmd: dedup_cmd = "%s %s > %s" % (tobam_cmd.format(out_file="", dext="full", **locals()), ds_cmd, tx_out_file) else: dedup_cmd = tobam_cmd.format(out_file="-o %s" % tx_out_file, dext="full", **locals()) # split and discordant BAMs -- give less memory/cores since smaller files sort_opt = "" cores, mem = _get_cores_memory(data, downscale=4) splitter_cmd = tobam_cmd.format(out_file="-o %s" % tx_sr_file, dext="spl", **locals()) discordant_cmd = tobam_cmd.format(out_file="-o %s" % tx_disc_file, dext="disc", **locals()) # samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem cmd = ("{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) " "| {dedup_cmd}") return cmd.format(**locals())
[ "def", "samblaster_dedup_sort", "(", "data", ",", "tx_out_file", ",", "tx_sr_file", ",", "tx_disc_file", ")", ":", "samblaster", "=", "config_utils", ".", "get_program", "(", "\"samblaster\"", ",", "data", "[", "\"config\"", "]", ")", "samtools", "=", "config_ut...
Deduplicate and sort with samblaster, produces split read and discordant pair files.
[ "Deduplicate", "and", "sort", "with", "samblaster", "produces", "split", "read", "and", "discordant", "pair", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L92-L116
223,445
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_biobambam_dedup_sort
def _biobambam_dedup_sort(data, tx_out_file): """Perform streaming deduplication and sorting with biobambam's bamsormadup """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] if data.get("align_split"): sort_opt = "-n" if data.get("align_split") and _check_dedup(data) else "" cmd = "{samtools} sort %s -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort -o {tx_out_file} -" % sort_opt else: # scale core usage to avoid memory issues with larger WGS samples cores = max(1, int(math.ceil(cores * 0.75))) ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup") bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} inputformat=sam threads={cores} tmpfile={tmp_file}-markdup " "SO=coordinate %s > {tx_out_file}" % ds_cmd) return cmd.format(**locals())
python
def _biobambam_dedup_sort(data, tx_out_file): """Perform streaming deduplication and sorting with biobambam's bamsormadup """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] if data.get("align_split"): sort_opt = "-n" if data.get("align_split") and _check_dedup(data) else "" cmd = "{samtools} sort %s -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort -o {tx_out_file} -" % sort_opt else: # scale core usage to avoid memory issues with larger WGS samples cores = max(1, int(math.ceil(cores * 0.75))) ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup") bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} inputformat=sam threads={cores} tmpfile={tmp_file}-markdup " "SO=coordinate %s > {tx_out_file}" % ds_cmd) return cmd.format(**locals())
[ "def", "_biobambam_dedup_sort", "(", "data", ",", "tx_out_file", ")", ":", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "cores", ",", "mem", "=", "_get_cores_memory", "(", "data", ",", "d...
Perform streaming deduplication and sorting with biobambam's bamsormadup
[ "Perform", "streaming", "deduplication", "and", "sorting", "with", "biobambam", "s", "bamsormadup" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L118-L134
223,446
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_sam_to_grouped_umi_cl
def _sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file): """Mark duplicates on aligner output and convert to grouped UMIs by position. Works with either a separate umi_file or UMI embedded in the read names. """ tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tmp_file), 1) cores, mem = _get_cores_memory(data) bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} tmpfile={tmp_file}-markdup inputformat=sam threads={cores} outputformat=bam " "level=0 SO=coordinate | ") # UMIs in a separate file if os.path.exists(umi_consensus) and os.path.isfile(umi_consensus): cmd += "fgbio {jvm_opts} AnnotateBamWithUmis -i /dev/stdin -f {umi_consensus} -o {tx_out_file}" # UMIs embedded in read name else: cmd += ("%s %s bamtag - | samtools view -b > {tx_out_file}" % (utils.get_program_python("umis"), config_utils.get_program("umis", data["config"]))) return cmd.format(**locals())
python
def _sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file): """Mark duplicates on aligner output and convert to grouped UMIs by position. Works with either a separate umi_file or UMI embedded in the read names. """ tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tmp_file), 1) cores, mem = _get_cores_memory(data) bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} tmpfile={tmp_file}-markdup inputformat=sam threads={cores} outputformat=bam " "level=0 SO=coordinate | ") # UMIs in a separate file if os.path.exists(umi_consensus) and os.path.isfile(umi_consensus): cmd += "fgbio {jvm_opts} AnnotateBamWithUmis -i /dev/stdin -f {umi_consensus} -o {tx_out_file}" # UMIs embedded in read name else: cmd += ("%s %s bamtag - | samtools view -b > {tx_out_file}" % (utils.get_program_python("umis"), config_utils.get_program("umis", data["config"]))) return cmd.format(**locals())
[ "def", "_sam_to_grouped_umi_cl", "(", "data", ",", "umi_consensus", ",", "tx_out_file", ")", ":", "tmp_file", "=", "\"%s-sorttmp\"", "%", "utils", ".", "splitext_plus", "(", "tx_out_file", ")", "[", "0", "]", "jvm_opts", "=", "_get_fgbio_jvm_opts", "(", "data", ...
Mark duplicates on aligner output and convert to grouped UMIs by position. Works with either a separate umi_file or UMI embedded in the read names.
[ "Mark", "duplicates", "on", "aligner", "output", "and", "convert", "to", "grouped", "UMIs", "by", "position", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L136-L155
223,447
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
umi_consensus
def umi_consensus(data): """Convert UMI grouped reads into fastq pair for re-alignment. """ align_bam = dd.get_work_bam(data) umi_method, umi_tag = _check_umi_type(align_bam) f1_out = "%s-cumi-1.fq.gz" % utils.splitext_plus(align_bam)[0] f2_out = "%s-cumi-2.fq.gz" % utils.splitext_plus(align_bam)[0] avg_coverage = coverage.get_average_coverage("rawumi", dd.get_variant_regions(data), data) if not utils.file_uptodate(f1_out, align_bam): with file_transaction(data, f1_out, f2_out) as (tx_f1_out, tx_f2_out): jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tx_f1_out), 2) # Improve speeds by avoiding compression read/write bottlenecks io_opts = "--async-io=true --compression=0" est_options = _estimate_fgbio_defaults(avg_coverage) group_opts, cons_opts, filter_opts = _get_fgbio_options(data, est_options, umi_method) cons_method = "CallDuplexConsensusReads" if umi_method == "paired" else "CallMolecularConsensusReads" tempfile = "%s-bamtofastq-tmp" % utils.splitext_plus(f1_out)[0] ref_file = dd.get_ref_file(data) cmd = ("unset JAVA_HOME && " "fgbio {jvm_opts} {io_opts} GroupReadsByUmi {group_opts} -t {umi_tag} -s {umi_method} " "-i {align_bam} | " "fgbio {jvm_opts} {io_opts} {cons_method} {cons_opts} --sort-order=:none: " "-i /dev/stdin -o /dev/stdout | " "fgbio {jvm_opts} {io_opts} FilterConsensusReads {filter_opts} -r {ref_file} " "-i /dev/stdin -o /dev/stdout | " "bamtofastq collate=1 T={tempfile} F={tx_f1_out} F2={tx_f2_out} tags=cD,cM,cE gz=1") do.run(cmd.format(**locals()), "UMI consensus fastq generation") return f1_out, f2_out, avg_coverage
python
def umi_consensus(data): """Convert UMI grouped reads into fastq pair for re-alignment. """ align_bam = dd.get_work_bam(data) umi_method, umi_tag = _check_umi_type(align_bam) f1_out = "%s-cumi-1.fq.gz" % utils.splitext_plus(align_bam)[0] f2_out = "%s-cumi-2.fq.gz" % utils.splitext_plus(align_bam)[0] avg_coverage = coverage.get_average_coverage("rawumi", dd.get_variant_regions(data), data) if not utils.file_uptodate(f1_out, align_bam): with file_transaction(data, f1_out, f2_out) as (tx_f1_out, tx_f2_out): jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tx_f1_out), 2) # Improve speeds by avoiding compression read/write bottlenecks io_opts = "--async-io=true --compression=0" est_options = _estimate_fgbio_defaults(avg_coverage) group_opts, cons_opts, filter_opts = _get_fgbio_options(data, est_options, umi_method) cons_method = "CallDuplexConsensusReads" if umi_method == "paired" else "CallMolecularConsensusReads" tempfile = "%s-bamtofastq-tmp" % utils.splitext_plus(f1_out)[0] ref_file = dd.get_ref_file(data) cmd = ("unset JAVA_HOME && " "fgbio {jvm_opts} {io_opts} GroupReadsByUmi {group_opts} -t {umi_tag} -s {umi_method} " "-i {align_bam} | " "fgbio {jvm_opts} {io_opts} {cons_method} {cons_opts} --sort-order=:none: " "-i /dev/stdin -o /dev/stdout | " "fgbio {jvm_opts} {io_opts} FilterConsensusReads {filter_opts} -r {ref_file} " "-i /dev/stdin -o /dev/stdout | " "bamtofastq collate=1 T={tempfile} F={tx_f1_out} F2={tx_f2_out} tags=cD,cM,cE gz=1") do.run(cmd.format(**locals()), "UMI consensus fastq generation") return f1_out, f2_out, avg_coverage
[ "def", "umi_consensus", "(", "data", ")", ":", "align_bam", "=", "dd", ".", "get_work_bam", "(", "data", ")", "umi_method", ",", "umi_tag", "=", "_check_umi_type", "(", "align_bam", ")", "f1_out", "=", "\"%s-cumi-1.fq.gz\"", "%", "utils", ".", "splitext_plus",...
Convert UMI grouped reads into fastq pair for re-alignment.
[ "Convert", "UMI", "grouped", "reads", "into", "fastq", "pair", "for", "re", "-", "alignment", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L188-L215
223,448
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_get_fgbio_options
def _get_fgbio_options(data, estimated_defaults, umi_method): """Get adjustable, through resources, or default options for fgbio. """ group_opts = ["--edits", "--min-map-q"] cons_opts = ["--min-input-base-quality"] if umi_method != "paired": cons_opts += ["--min-reads", "--max-reads"] filter_opts = ["--min-reads", "--min-base-quality", "--max-base-error-rate"] defaults = {"--min-reads": "1", "--max-reads": "100000", "--min-map-q": "1", "--min-base-quality": "13", "--max-base-error-rate": "0.1", "--min-input-base-quality": "2", "--edits": "1"} defaults.update(estimated_defaults) ropts = config_utils.get_resources("fgbio", data["config"]).get("options", []) assert len(ropts) % 2 == 0, "Expect even number of options for fgbio" % ropts ropts = dict(tz.partition(2, ropts)) # Back compatibility for older base quality settings if "--min-consensus-base-quality" in ropts: ropts["--min-base-quality"] = ropts.pop("--min-consensus-base-quality") defaults.update(ropts) group_out = " ".join(["%s=%s" % (x, defaults[x]) for x in group_opts]) cons_out = " ".join(["%s=%s" % (x, defaults[x]) for x in cons_opts]) filter_out = " ".join(["%s=%s" % (x, defaults[x]) for x in filter_opts]) if umi_method != "paired": cons_out += " --output-per-base-tags=false" return group_out, cons_out, filter_out
python
def _get_fgbio_options(data, estimated_defaults, umi_method): """Get adjustable, through resources, or default options for fgbio. """ group_opts = ["--edits", "--min-map-q"] cons_opts = ["--min-input-base-quality"] if umi_method != "paired": cons_opts += ["--min-reads", "--max-reads"] filter_opts = ["--min-reads", "--min-base-quality", "--max-base-error-rate"] defaults = {"--min-reads": "1", "--max-reads": "100000", "--min-map-q": "1", "--min-base-quality": "13", "--max-base-error-rate": "0.1", "--min-input-base-quality": "2", "--edits": "1"} defaults.update(estimated_defaults) ropts = config_utils.get_resources("fgbio", data["config"]).get("options", []) assert len(ropts) % 2 == 0, "Expect even number of options for fgbio" % ropts ropts = dict(tz.partition(2, ropts)) # Back compatibility for older base quality settings if "--min-consensus-base-quality" in ropts: ropts["--min-base-quality"] = ropts.pop("--min-consensus-base-quality") defaults.update(ropts) group_out = " ".join(["%s=%s" % (x, defaults[x]) for x in group_opts]) cons_out = " ".join(["%s=%s" % (x, defaults[x]) for x in cons_opts]) filter_out = " ".join(["%s=%s" % (x, defaults[x]) for x in filter_opts]) if umi_method != "paired": cons_out += " --output-per-base-tags=false" return group_out, cons_out, filter_out
[ "def", "_get_fgbio_options", "(", "data", ",", "estimated_defaults", ",", "umi_method", ")", ":", "group_opts", "=", "[", "\"--edits\"", ",", "\"--min-map-q\"", "]", "cons_opts", "=", "[", "\"--min-input-base-quality\"", "]", "if", "umi_method", "!=", "\"paired\"", ...
Get adjustable, through resources, or default options for fgbio.
[ "Get", "adjustable", "through", "resources", "or", "default", "options", "for", "fgbio", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L235-L263
223,449
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_check_dedup
def _check_dedup(data): """Check configuration for de-duplication. Defaults to no de-duplication for RNA-seq and small RNA, the back compatible default. Allow overwriting with explicit `mark_duplicates: true` setting. Also defaults to false for no alignment inputs. """ if dd.get_analysis(data).lower() in ["rna-seq", "smallrna-seq"] or not dd.get_aligner(data): dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), False) else: dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), True) if dup_param and isinstance(dup_param, six.string_types): logger.info("Warning: bcbio no longer support explicit setting of mark_duplicate algorithm. " "Using best-practice choice based on input data.") dup_param = True return dup_param
python
def _check_dedup(data): """Check configuration for de-duplication. Defaults to no de-duplication for RNA-seq and small RNA, the back compatible default. Allow overwriting with explicit `mark_duplicates: true` setting. Also defaults to false for no alignment inputs. """ if dd.get_analysis(data).lower() in ["rna-seq", "smallrna-seq"] or not dd.get_aligner(data): dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), False) else: dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), True) if dup_param and isinstance(dup_param, six.string_types): logger.info("Warning: bcbio no longer support explicit setting of mark_duplicate algorithm. " "Using best-practice choice based on input data.") dup_param = True return dup_param
[ "def", "_check_dedup", "(", "data", ")", ":", "if", "dd", ".", "get_analysis", "(", "data", ")", ".", "lower", "(", ")", "in", "[", "\"rna-seq\"", ",", "\"smallrna-seq\"", "]", "or", "not", "dd", ".", "get_aligner", "(", "data", ")", ":", "dup_param", ...
Check configuration for de-duplication. Defaults to no de-duplication for RNA-seq and small RNA, the back compatible default. Allow overwriting with explicit `mark_duplicates: true` setting. Also defaults to false for no alignment inputs.
[ "Check", "configuration", "for", "de", "-", "duplication", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L265-L281
223,450
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
dedup_bam
def dedup_bam(in_bam, data): """Perform non-stream based deduplication of BAM input files using biobambam. """ if _check_dedup(data): out_file = os.path.join(utils.safe_makedir(os.path.join(os.getcwd(), "align", dd.get_sample_name(data))), "%s-dedup%s" % utils.splitext_plus(os.path.basename(in_bam))) if not utils.file_exists(out_file): with tx_tmpdir(data) as tmpdir: with file_transaction(data, out_file) as tx_out_file: bammarkduplicates = config_utils.get_program("bammarkduplicates", data["config"]) base_tmp = os.path.join(tmpdir, os.path.splitext(os.path.basename(tx_out_file))[0]) cores, mem = _get_cores_memory(data, downscale=2) cmd = ("{bammarkduplicates} tmpfile={base_tmp}-markdup " "markthreads={cores} I={in_bam} O={tx_out_file}") do.run(cmd.format(**locals()), "De-duplication with biobambam") bam.index(out_file, data["config"]) return out_file else: return in_bam
python
def dedup_bam(in_bam, data): """Perform non-stream based deduplication of BAM input files using biobambam. """ if _check_dedup(data): out_file = os.path.join(utils.safe_makedir(os.path.join(os.getcwd(), "align", dd.get_sample_name(data))), "%s-dedup%s" % utils.splitext_plus(os.path.basename(in_bam))) if not utils.file_exists(out_file): with tx_tmpdir(data) as tmpdir: with file_transaction(data, out_file) as tx_out_file: bammarkduplicates = config_utils.get_program("bammarkduplicates", data["config"]) base_tmp = os.path.join(tmpdir, os.path.splitext(os.path.basename(tx_out_file))[0]) cores, mem = _get_cores_memory(data, downscale=2) cmd = ("{bammarkduplicates} tmpfile={base_tmp}-markdup " "markthreads={cores} I={in_bam} O={tx_out_file}") do.run(cmd.format(**locals()), "De-duplication with biobambam") bam.index(out_file, data["config"]) return out_file else: return in_bam
[ "def", "dedup_bam", "(", "in_bam", ",", "data", ")", ":", "if", "_check_dedup", "(", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd"...
Perform non-stream based deduplication of BAM input files using biobambam.
[ "Perform", "non", "-", "stream", "based", "deduplication", "of", "BAM", "input", "files", "using", "biobambam", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L283-L301
223,451
bcbio/bcbio-nextgen
bcbio/structural/titancna.py
_finalize_sv
def _finalize_sv(solution_file, data): """Add output files from TitanCNA calling optional solution. """ out = {"variantcaller": "titancna"} with open(solution_file) as in_handle: solution = dict(zip(in_handle.readline().strip("\r\n").split("\t"), in_handle.readline().strip("\r\n").split("\t"))) if solution.get("path"): out["purity"] = solution["purity"] out["ploidy"] = solution["ploidy"] out["cellular_prevalence"] = [x.strip() for x in solution["cellPrev"].split(",")] base = os.path.basename(solution["path"]) out["plot"] = dict([(n, solution["path"] + ext) for (n, ext) in [("rplots", ".Rplots.pdf"), ("cf", "/%s_CF.pdf" % base), ("cna", "/%s_CNA.pdf" % base), ("loh", "/%s_LOH.pdf" % base)] if os.path.exists(solution["path"] + ext)]) out["subclones"] = "%s.segs.txt" % solution["path"] out["hetsummary"] = solution_file out["vrn_file"] = to_vcf(out["subclones"], "TitanCNA", _get_header, _seg_to_vcf, data) out["lohsummary"] = loh.summary_status(out, data) return out
python
def _finalize_sv(solution_file, data): """Add output files from TitanCNA calling optional solution. """ out = {"variantcaller": "titancna"} with open(solution_file) as in_handle: solution = dict(zip(in_handle.readline().strip("\r\n").split("\t"), in_handle.readline().strip("\r\n").split("\t"))) if solution.get("path"): out["purity"] = solution["purity"] out["ploidy"] = solution["ploidy"] out["cellular_prevalence"] = [x.strip() for x in solution["cellPrev"].split(",")] base = os.path.basename(solution["path"]) out["plot"] = dict([(n, solution["path"] + ext) for (n, ext) in [("rplots", ".Rplots.pdf"), ("cf", "/%s_CF.pdf" % base), ("cna", "/%s_CNA.pdf" % base), ("loh", "/%s_LOH.pdf" % base)] if os.path.exists(solution["path"] + ext)]) out["subclones"] = "%s.segs.txt" % solution["path"] out["hetsummary"] = solution_file out["vrn_file"] = to_vcf(out["subclones"], "TitanCNA", _get_header, _seg_to_vcf, data) out["lohsummary"] = loh.summary_status(out, data) return out
[ "def", "_finalize_sv", "(", "solution_file", ",", "data", ")", ":", "out", "=", "{", "\"variantcaller\"", ":", "\"titancna\"", "}", "with", "open", "(", "solution_file", ")", "as", "in_handle", ":", "solution", "=", "dict", "(", "zip", "(", "in_handle", "....
Add output files from TitanCNA calling optional solution.
[ "Add", "output", "files", "from", "TitanCNA", "calling", "optional", "solution", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/titancna.py#L52-L73
223,452
bcbio/bcbio-nextgen
bcbio/structural/titancna.py
_should_run
def _should_run(het_file): """Check for enough input data to proceed with analysis. """ has_hets = False with open(het_file) as in_handle: for i, line in enumerate(in_handle): if i > 1: has_hets = True break return has_hets
python
def _should_run(het_file): """Check for enough input data to proceed with analysis. """ has_hets = False with open(het_file) as in_handle: for i, line in enumerate(in_handle): if i > 1: has_hets = True break return has_hets
[ "def", "_should_run", "(", "het_file", ")", ":", "has_hets", "=", "False", "with", "open", "(", "het_file", ")", "as", "in_handle", ":", "for", "i", ",", "line", "in", "enumerate", "(", "in_handle", ")", ":", "if", "i", ">", "1", ":", "has_hets", "="...
Check for enough input data to proceed with analysis.
[ "Check", "for", "enough", "input", "data", "to", "proceed", "with", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/titancna.py#L75-L84
223,453
bcbio/bcbio-nextgen
bcbio/structural/titancna.py
_titan_cn_file
def _titan_cn_file(cnr_file, work_dir, data): """Convert CNVkit or GATK4 normalized input into TitanCNA ready format. """ out_file = os.path.join(work_dir, "%s.cn" % (utils.splitext_plus(os.path.basename(cnr_file))[0])) support_cols = {"cnvkit": ["chromosome", "start", "end", "log2"], "gatk-cnv": ["CONTIG", "START", "END", "LOG2_COPY_RATIO"]} cols = support_cols[cnvkit.bin_approach(data)] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: iterator = pd.read_table(cnr_file, sep="\t", iterator=True, header=0, comment="@") with open(tx_out_file, "w") as handle: for chunk in iterator: chunk = chunk[cols] chunk.columns = ["chrom", "start", "end", "logR"] if cnvkit.bin_approach(data) == "cnvkit": chunk['start'] += 1 chunk.to_csv(handle, mode="a", sep="\t", index=False) return out_file
python
def _titan_cn_file(cnr_file, work_dir, data): """Convert CNVkit or GATK4 normalized input into TitanCNA ready format. """ out_file = os.path.join(work_dir, "%s.cn" % (utils.splitext_plus(os.path.basename(cnr_file))[0])) support_cols = {"cnvkit": ["chromosome", "start", "end", "log2"], "gatk-cnv": ["CONTIG", "START", "END", "LOG2_COPY_RATIO"]} cols = support_cols[cnvkit.bin_approach(data)] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: iterator = pd.read_table(cnr_file, sep="\t", iterator=True, header=0, comment="@") with open(tx_out_file, "w") as handle: for chunk in iterator: chunk = chunk[cols] chunk.columns = ["chrom", "start", "end", "logR"] if cnvkit.bin_approach(data) == "cnvkit": chunk['start'] += 1 chunk.to_csv(handle, mode="a", sep="\t", index=False) return out_file
[ "def", "_titan_cn_file", "(", "cnr_file", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.cn\"", "%", "(", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(",...
Convert CNVkit or GATK4 normalized input into TitanCNA ready format.
[ "Convert", "CNVkit", "or", "GATK4", "normalized", "input", "into", "TitanCNA", "ready", "format", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/titancna.py#L164-L181
223,454
bcbio/bcbio-nextgen
bcbio/structural/titancna.py
to_vcf
def to_vcf(in_file, caller, header_fn, vcf_fn, data, sep="\t"): """Convert output TitanCNA segs file into bgzipped VCF. """ out_file = "%s.vcf" % utils.splitext_plus(in_file)[0] if not utils.file_exists(out_file + ".gz") and not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(_vcf_header.format(caller=caller)) out_handle.write("\t".join(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", dd.get_sample_name(data)]) + "\n") header, in_handle = header_fn(in_handle) for line in in_handle: out = vcf_fn(dict(zip(header, line.strip().split(sep)))) if out: out_handle.write("\t".join(out) + "\n") out_file = vcfutils.bgzip_and_index(out_file, data["config"]) effects_vcf, _ = effects.add_to_vcf(out_file, data, "snpeff") return effects_vcf or out_file
python
def to_vcf(in_file, caller, header_fn, vcf_fn, data, sep="\t"): """Convert output TitanCNA segs file into bgzipped VCF. """ out_file = "%s.vcf" % utils.splitext_plus(in_file)[0] if not utils.file_exists(out_file + ".gz") and not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(_vcf_header.format(caller=caller)) out_handle.write("\t".join(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", dd.get_sample_name(data)]) + "\n") header, in_handle = header_fn(in_handle) for line in in_handle: out = vcf_fn(dict(zip(header, line.strip().split(sep)))) if out: out_handle.write("\t".join(out) + "\n") out_file = vcfutils.bgzip_and_index(out_file, data["config"]) effects_vcf, _ = effects.add_to_vcf(out_file, data, "snpeff") return effects_vcf or out_file
[ "def", "to_vcf", "(", "in_file", ",", "caller", ",", "header_fn", ",", "vcf_fn", ",", "data", ",", "sep", "=", "\"\\t\"", ")", ":", "out_file", "=", "\"%s.vcf\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", "if", "not", "u...
Convert output TitanCNA segs file into bgzipped VCF.
[ "Convert", "output", "TitanCNA", "segs", "file", "into", "bgzipped", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/titancna.py#L214-L232
223,455
bcbio/bcbio-nextgen
bcbio/structural/metasv.py
run
def run(items): """Run MetaSV if we have enough supported callers, adding output to the set of calls. """ assert len(items) == 1, "Expect one input to MetaSV ensemble calling" data = items[0] work_dir = _sv_workdir(data) out_file = os.path.join(work_dir, "variants.vcf.gz") cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data), "--bam", dd.get_align_bam(data), "--outdir", work_dir] methods = [] for call in data.get("sv", []): vcf_file = call.get("vcf_file", call.get("vrn_file", None)) if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods and vcf_file is not None: methods.append(call["variantcaller"]) cmd += ["--%s_vcf" % call["variantcaller"], vcf_file] if len(methods) >= MIN_CALLERS: if not utils.file_exists(out_file): tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw")) ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data), os.path.join(tx_work_dir, "insert-stats.yaml")) cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))] cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")] cmd += ["--assembly_max_tools=1", "--assembly_pad=500"] cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]] do.run(cmd, "Combine variant calls with MetaSV") filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || " "(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)") filter_file = vfilter.cutoff_w_expression(out_file, filters, data, name="ReassemblyStats", limit_regions=None) effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff") data["sv"].append({"variantcaller": "metasv", "vrn_file": effects_vcf or filter_file}) return [data]
python
def run(items): """Run MetaSV if we have enough supported callers, adding output to the set of calls. """ assert len(items) == 1, "Expect one input to MetaSV ensemble calling" data = items[0] work_dir = _sv_workdir(data) out_file = os.path.join(work_dir, "variants.vcf.gz") cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data), "--bam", dd.get_align_bam(data), "--outdir", work_dir] methods = [] for call in data.get("sv", []): vcf_file = call.get("vcf_file", call.get("vrn_file", None)) if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods and vcf_file is not None: methods.append(call["variantcaller"]) cmd += ["--%s_vcf" % call["variantcaller"], vcf_file] if len(methods) >= MIN_CALLERS: if not utils.file_exists(out_file): tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw")) ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data), os.path.join(tx_work_dir, "insert-stats.yaml")) cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))] cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")] cmd += ["--assembly_max_tools=1", "--assembly_pad=500"] cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]] do.run(cmd, "Combine variant calls with MetaSV") filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || " "(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)") filter_file = vfilter.cutoff_w_expression(out_file, filters, data, name="ReassemblyStats", limit_regions=None) effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff") data["sv"].append({"variantcaller": "metasv", "vrn_file": effects_vcf or filter_file}) return [data]
[ "def", "run", "(", "items", ")", ":", "assert", "len", "(", "items", ")", "==", "1", ",", "\"Expect one input to MetaSV ensemble calling\"", "data", "=", "items", "[", "0", "]", "work_dir", "=", "_sv_workdir", "(", "data", ")", "out_file", "=", "os", ".", ...
Run MetaSV if we have enough supported callers, adding output to the set of calls.
[ "Run", "MetaSV", "if", "we", "have", "enough", "supported", "callers", "adding", "output", "to", "the", "set", "of", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/metasv.py#L18-L52
223,456
bcbio/bcbio-nextgen
bcbio/rnaseq/count.py
combine_count_files
def combine_count_files(files, out_file=None, ext=".fpkm"): """ combine a set of count files into a single combined file """ files = list(files) if not files: return None assert all([file_exists(x) for x in files]), \ "Some count files in %s do not exist." % files for f in files: assert file_exists(f), "%s does not exist or is empty." % f col_names = [os.path.basename(x.replace(ext, "")) for x in files] if not out_file: out_dir = os.path.join(os.path.dirname(files[0])) out_file = os.path.join(out_dir, "combined.counts") if file_exists(out_file): return out_file logger.info("Combining count files into %s." % out_file) row_names = [] col_vals = defaultdict(list) for i, f in enumerate(files): vals = [] if i == 0: with open(f) as in_handle: for line in in_handle: rname, val = line.strip().split("\t") row_names.append(rname) vals.append(val) else: with open(f) as in_handle: for line in in_handle: _, val = line.strip().split("\t") vals.append(val) col_vals[col_names[i]] = vals df = pd.DataFrame(col_vals, index=row_names) df.to_csv(out_file, sep="\t", index_label="id") return out_file
python
def combine_count_files(files, out_file=None, ext=".fpkm"): """ combine a set of count files into a single combined file """ files = list(files) if not files: return None assert all([file_exists(x) for x in files]), \ "Some count files in %s do not exist." % files for f in files: assert file_exists(f), "%s does not exist or is empty." % f col_names = [os.path.basename(x.replace(ext, "")) for x in files] if not out_file: out_dir = os.path.join(os.path.dirname(files[0])) out_file = os.path.join(out_dir, "combined.counts") if file_exists(out_file): return out_file logger.info("Combining count files into %s." % out_file) row_names = [] col_vals = defaultdict(list) for i, f in enumerate(files): vals = [] if i == 0: with open(f) as in_handle: for line in in_handle: rname, val = line.strip().split("\t") row_names.append(rname) vals.append(val) else: with open(f) as in_handle: for line in in_handle: _, val = line.strip().split("\t") vals.append(val) col_vals[col_names[i]] = vals df = pd.DataFrame(col_vals, index=row_names) df.to_csv(out_file, sep="\t", index_label="id") return out_file
[ "def", "combine_count_files", "(", "files", ",", "out_file", "=", "None", ",", "ext", "=", "\".fpkm\"", ")", ":", "files", "=", "list", "(", "files", ")", "if", "not", "files", ":", "return", "None", "assert", "all", "(", "[", "file_exists", "(", "x", ...
combine a set of count files into a single combined file
[ "combine", "a", "set", "of", "count", "files", "into", "a", "single", "combined", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/count.py#L13-L51
223,457
bcbio/bcbio-nextgen
scripts/utils/cwltool2nextflow.py
nf_step_to_process
def nf_step_to_process(step, out_handle): """Convert CWL step into a nextflow process. """ pprint.pprint(step) directives = [] for req in step["task_definition"]["requirements"]: if req["requirement_type"] == "docker": directives.append("container '%s'" % req["value"]) elif req["requirement_type"] == "cpu": directives.append("cpus %s" % req["value"]) elif req["requirement_type"] == "memory": directives.append("memory '%s'" % req["value"]) task_id = step["task_id"] directives = "\n ".join(directives) inputs = "\n ".join(nf_io_to_process(step["inputs"], step["task_definition"]["inputs"], step["scatter"])) outputs = "\n ".join(nf_io_to_process(step["outputs"], step["task_definition"]["outputs"])) commandline = (step["task_definition"]["baseCommand"] + " " + " ".join([nf_input_to_cl(i) for i in step["task_definition"]["inputs"]])) out_handle.write(_nf_process_tmpl.format(**locals()))
python
def nf_step_to_process(step, out_handle): """Convert CWL step into a nextflow process. """ pprint.pprint(step) directives = [] for req in step["task_definition"]["requirements"]: if req["requirement_type"] == "docker": directives.append("container '%s'" % req["value"]) elif req["requirement_type"] == "cpu": directives.append("cpus %s" % req["value"]) elif req["requirement_type"] == "memory": directives.append("memory '%s'" % req["value"]) task_id = step["task_id"] directives = "\n ".join(directives) inputs = "\n ".join(nf_io_to_process(step["inputs"], step["task_definition"]["inputs"], step["scatter"])) outputs = "\n ".join(nf_io_to_process(step["outputs"], step["task_definition"]["outputs"])) commandline = (step["task_definition"]["baseCommand"] + " " + " ".join([nf_input_to_cl(i) for i in step["task_definition"]["inputs"]])) out_handle.write(_nf_process_tmpl.format(**locals()))
[ "def", "nf_step_to_process", "(", "step", ",", "out_handle", ")", ":", "pprint", ".", "pprint", "(", "step", ")", "directives", "=", "[", "]", "for", "req", "in", "step", "[", "\"task_definition\"", "]", "[", "\"requirements\"", "]", ":", "if", "req", "[...
Convert CWL step into a nextflow process.
[ "Convert", "CWL", "step", "into", "a", "nextflow", "process", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/cwltool2nextflow.py#L53-L74
223,458
bcbio/bcbio-nextgen
scripts/utils/cwltool2nextflow.py
nf_input_to_cl
def nf_input_to_cl(inp): """Convert an input description into command line argument. """ sep = " " if inp.get("separate") else "" val = "'%s'" % inp.get("default") if inp.get("default") else "$%s" % inp["name"] return "%s%s%s" % (inp["prefix"], sep, val)
python
def nf_input_to_cl(inp): """Convert an input description into command line argument. """ sep = " " if inp.get("separate") else "" val = "'%s'" % inp.get("default") if inp.get("default") else "$%s" % inp["name"] return "%s%s%s" % (inp["prefix"], sep, val)
[ "def", "nf_input_to_cl", "(", "inp", ")", ":", "sep", "=", "\" \"", "if", "inp", ".", "get", "(", "\"separate\"", ")", "else", "\"\"", "val", "=", "\"'%s'\"", "%", "inp", ".", "get", "(", "\"default\"", ")", "if", "inp", ".", "get", "(", "\"default\"...
Convert an input description into command line argument.
[ "Convert", "an", "input", "description", "into", "command", "line", "argument", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/cwltool2nextflow.py#L107-L112
223,459
bcbio/bcbio-nextgen
scripts/utils/cwltool2nextflow.py
_wf_to_dict
def _wf_to_dict(wf): """Parse a workflow into cwl2wdl style dictionary. """ inputs, outputs = _get_wf_inout(wf) out = {"name": _id_to_name(wf.tool["id"]).replace("-", "_"), "inputs": inputs, "outputs": outputs, "steps": [], "subworkflows": [], "requirements": []} for step in wf.steps: inputs, outputs = _get_step_inout(step) inputs, scatter = _organize_step_scatter(step, inputs) if isinstance(step.embedded_tool, cwltool.workflow.Workflow): wf_def = _wf_to_dict(step.embedded_tool) out["subworkflows"].append({"id": wf_def["name"], "definition": wf_def, "inputs": inputs, "outputs": outputs, "scatter": scatter}) else: task_def = _tool_to_dict(step.embedded_tool) out["steps"].append({"task_id": task_def["name"], "task_definition": task_def, "inputs": inputs, "outputs": outputs, "scatter": scatter}) return out
python
def _wf_to_dict(wf): """Parse a workflow into cwl2wdl style dictionary. """ inputs, outputs = _get_wf_inout(wf) out = {"name": _id_to_name(wf.tool["id"]).replace("-", "_"), "inputs": inputs, "outputs": outputs, "steps": [], "subworkflows": [], "requirements": []} for step in wf.steps: inputs, outputs = _get_step_inout(step) inputs, scatter = _organize_step_scatter(step, inputs) if isinstance(step.embedded_tool, cwltool.workflow.Workflow): wf_def = _wf_to_dict(step.embedded_tool) out["subworkflows"].append({"id": wf_def["name"], "definition": wf_def, "inputs": inputs, "outputs": outputs, "scatter": scatter}) else: task_def = _tool_to_dict(step.embedded_tool) out["steps"].append({"task_id": task_def["name"], "task_definition": task_def, "inputs": inputs, "outputs": outputs, "scatter": scatter}) return out
[ "def", "_wf_to_dict", "(", "wf", ")", ":", "inputs", ",", "outputs", "=", "_get_wf_inout", "(", "wf", ")", "out", "=", "{", "\"name\"", ":", "_id_to_name", "(", "wf", ".", "tool", "[", "\"id\"", "]", ")", ".", "replace", "(", "\"-\"", ",", "\"_\"", ...
Parse a workflow into cwl2wdl style dictionary.
[ "Parse", "a", "workflow", "into", "cwl2wdl", "style", "dictionary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/cwltool2nextflow.py#L116-L134
223,460
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_get_validate
def _get_validate(data): """Retrieve items to validate, from single samples or from combined joint calls. """ if data.get("vrn_file") and tz.get_in(["config", "algorithm", "validate"], data): return utils.deepish_copy(data) elif "group_orig" in data: for sub in multi.get_orig_items(data): if "validate" in sub["config"]["algorithm"]: sub_val = utils.deepish_copy(sub) sub_val["vrn_file"] = data["vrn_file"] return sub_val return None
python
def _get_validate(data): """Retrieve items to validate, from single samples or from combined joint calls. """ if data.get("vrn_file") and tz.get_in(["config", "algorithm", "validate"], data): return utils.deepish_copy(data) elif "group_orig" in data: for sub in multi.get_orig_items(data): if "validate" in sub["config"]["algorithm"]: sub_val = utils.deepish_copy(sub) sub_val["vrn_file"] = data["vrn_file"] return sub_val return None
[ "def", "_get_validate", "(", "data", ")", ":", "if", "data", ".", "get", "(", "\"vrn_file\"", ")", "and", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"validate\"", "]", ",", "data", ")", ":", "return", "utils", ".", "deepi...
Retrieve items to validate, from single samples or from combined joint calls.
[ "Retrieve", "items", "to", "validate", "from", "single", "samples", "or", "from", "combined", "joint", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L31-L42
223,461
bcbio/bcbio-nextgen
bcbio/variation/validate.py
normalize_input_path
def normalize_input_path(x, data): """Normalize path for input files, handling relative paths. Looks for non-absolute paths in local and fastq directories """ if x is None: return None elif os.path.isabs(x): return os.path.normpath(x) else: for d in [data["dirs"].get("fastq"), data["dirs"].get("work")]: if d: cur_x = os.path.normpath(os.path.join(d, x)) if os.path.exists(cur_x): return cur_x raise IOError("Could not find validation file %s" % x)
python
def normalize_input_path(x, data): """Normalize path for input files, handling relative paths. Looks for non-absolute paths in local and fastq directories """ if x is None: return None elif os.path.isabs(x): return os.path.normpath(x) else: for d in [data["dirs"].get("fastq"), data["dirs"].get("work")]: if d: cur_x = os.path.normpath(os.path.join(d, x)) if os.path.exists(cur_x): return cur_x raise IOError("Could not find validation file %s" % x)
[ "def", "normalize_input_path", "(", "x", ",", "data", ")", ":", "if", "x", "is", "None", ":", "return", "None", "elif", "os", ".", "path", ".", "isabs", "(", "x", ")", ":", "return", "os", ".", "path", ".", "normpath", "(", "x", ")", "else", ":",...
Normalize path for input files, handling relative paths. Looks for non-absolute paths in local and fastq directories
[ "Normalize", "path", "for", "input", "files", "handling", "relative", "paths", ".", "Looks", "for", "non", "-", "absolute", "paths", "in", "local", "and", "fastq", "directories" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L44-L58
223,462
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_get_caller_supplement
def _get_caller_supplement(caller, data): """Some callers like MuTect incorporate a second caller for indels. """ if caller == "mutect": icaller = tz.get_in(["config", "algorithm", "indelcaller"], data) if icaller: caller = "%s/%s" % (caller, icaller) return caller
python
def _get_caller_supplement(caller, data): """Some callers like MuTect incorporate a second caller for indels. """ if caller == "mutect": icaller = tz.get_in(["config", "algorithm", "indelcaller"], data) if icaller: caller = "%s/%s" % (caller, icaller) return caller
[ "def", "_get_caller_supplement", "(", "caller", ",", "data", ")", ":", "if", "caller", "==", "\"mutect\"", ":", "icaller", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"indelcaller\"", "]", ",", "data", ")", "if", "icaller...
Some callers like MuTect incorporate a second caller for indels.
[ "Some", "callers", "like", "MuTect", "incorporate", "a", "second", "caller", "for", "indels", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L84-L91
223,463
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_pick_lead_item
def _pick_lead_item(items): """Choose lead item for a set of samples. Picks tumors for tumor/normal pairs and first sample for batch groups. """ paired = vcfutils.get_paired(items) if paired: return paired.tumor_data else: return list(items)[0]
python
def _pick_lead_item(items): """Choose lead item for a set of samples. Picks tumors for tumor/normal pairs and first sample for batch groups. """ paired = vcfutils.get_paired(items) if paired: return paired.tumor_data else: return list(items)[0]
[ "def", "_pick_lead_item", "(", "items", ")", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", ")", "if", "paired", ":", "return", "paired", ".", "tumor_data", "else", ":", "return", "list", "(", "items", ")", "[", "0", "]" ]
Choose lead item for a set of samples. Picks tumors for tumor/normal pairs and first sample for batch groups.
[ "Choose", "lead", "item", "for", "a", "set", "of", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L93-L102
223,464
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_normalize_cwl_inputs
def _normalize_cwl_inputs(items): """Extract variation and validation data from CWL input list of batched samples. """ with_validate = {} vrn_files = [] ready_items = [] batch_samples = [] for data in (cwlutils.normalize_missing(utils.to_single_data(d)) for d in items): batch_samples.append(dd.get_sample_name(data)) if tz.get_in(["config", "algorithm", "validate"], data): with_validate[_checksum(tz.get_in(["config", "algorithm", "validate"], data))] = data if data.get("vrn_file"): vrn_files.append(data["vrn_file"]) ready_items.append(data) if len(with_validate) == 0: data = _pick_lead_item(ready_items) data["batch_samples"] = batch_samples return data else: assert len(with_validate) == 1, len(with_validate) assert len(set(vrn_files)) == 1, set(vrn_files) data = _pick_lead_item(with_validate.values()) data["batch_samples"] = batch_samples data["vrn_file"] = vrn_files[0] return data
python
def _normalize_cwl_inputs(items): """Extract variation and validation data from CWL input list of batched samples. """ with_validate = {} vrn_files = [] ready_items = [] batch_samples = [] for data in (cwlutils.normalize_missing(utils.to_single_data(d)) for d in items): batch_samples.append(dd.get_sample_name(data)) if tz.get_in(["config", "algorithm", "validate"], data): with_validate[_checksum(tz.get_in(["config", "algorithm", "validate"], data))] = data if data.get("vrn_file"): vrn_files.append(data["vrn_file"]) ready_items.append(data) if len(with_validate) == 0: data = _pick_lead_item(ready_items) data["batch_samples"] = batch_samples return data else: assert len(with_validate) == 1, len(with_validate) assert len(set(vrn_files)) == 1, set(vrn_files) data = _pick_lead_item(with_validate.values()) data["batch_samples"] = batch_samples data["vrn_file"] = vrn_files[0] return data
[ "def", "_normalize_cwl_inputs", "(", "items", ")", ":", "with_validate", "=", "{", "}", "vrn_files", "=", "[", "]", "ready_items", "=", "[", "]", "batch_samples", "=", "[", "]", "for", "data", "in", "(", "cwlutils", ".", "normalize_missing", "(", "utils", ...
Extract variation and validation data from CWL input list of batched samples.
[ "Extract", "variation", "and", "validation", "data", "from", "CWL", "input", "list", "of", "batched", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L104-L128
223,465
bcbio/bcbio-nextgen
bcbio/variation/validate.py
compare_to_rm
def compare_to_rm(data): """Compare final variant calls against reference materials of known calls. """ if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])): data = _normalize_cwl_inputs(data) toval_data = _get_validate(data) toval_data = cwlutils.unpack_tarballs(toval_data, toval_data) if toval_data: caller = _get_caller(toval_data) sample = dd.get_sample_name(toval_data) base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller)) if isinstance(toval_data["vrn_file"], (list, tuple)): raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"]) else: vrn_file = os.path.abspath(toval_data["vrn_file"]) rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data) rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"), toval_data), toval_data) rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-", bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep"))) rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"), base_dir, data) rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data), data.get("genome_build"), base_dir, data) if rm_interval_file else None) vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg") # RTG can fail on totally empty files. Call everything in truth set as false negatives if not vcfutils.vcf_has_variants(vrn_file): eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, "fn") data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) # empty validation file, every call is a false positive elif not vcfutils.vcf_has_variants(rm_file): eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, "fp") data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) elif vmethod in ["rtg", "rtg-squash-ploidy"]: eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod) eval_files = _annotate_validations(eval_files, toval_data) data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) elif vmethod == "hap.py": data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data) elif vmethod == "bcbio.variation": data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, toval_data) return [[data]]
python
def compare_to_rm(data): """Compare final variant calls against reference materials of known calls. """ if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])): data = _normalize_cwl_inputs(data) toval_data = _get_validate(data) toval_data = cwlutils.unpack_tarballs(toval_data, toval_data) if toval_data: caller = _get_caller(toval_data) sample = dd.get_sample_name(toval_data) base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller)) if isinstance(toval_data["vrn_file"], (list, tuple)): raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"]) else: vrn_file = os.path.abspath(toval_data["vrn_file"]) rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data) rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"), toval_data), toval_data) rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-", bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep"))) rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"), base_dir, data) rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data), data.get("genome_build"), base_dir, data) if rm_interval_file else None) vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg") # RTG can fail on totally empty files. Call everything in truth set as false negatives if not vcfutils.vcf_has_variants(vrn_file): eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, "fn") data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) # empty validation file, every call is a false positive elif not vcfutils.vcf_has_variants(rm_file): eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, "fp") data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) elif vmethod in ["rtg", "rtg-squash-ploidy"]: eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod) eval_files = _annotate_validations(eval_files, toval_data) data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) elif vmethod == "hap.py": data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data) elif vmethod == "bcbio.variation": data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, toval_data) return [[data]]
[ "def", "compare_to_rm", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", "and", "cwlutils", ".", "is_cwl_run", "(", "utils", ".", "to_single_data", "(", "data", "[", "0", "]", ")", ")", ":", "data", ...
Compare final variant calls against reference materials of known calls.
[ "Compare", "final", "variant", "calls", "against", "reference", "materials", "of", "known", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L139-L184
223,466
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_annotate_validations
def _annotate_validations(eval_files, data): """Add annotations about potential problem regions to validation VCFs. """ for key in ["tp", "tp-calls", "fp", "fn"]: if eval_files.get(key): eval_files[key] = annotation.add_genome_context(eval_files[key], data) return eval_files
python
def _annotate_validations(eval_files, data): """Add annotations about potential problem regions to validation VCFs. """ for key in ["tp", "tp-calls", "fp", "fn"]: if eval_files.get(key): eval_files[key] = annotation.add_genome_context(eval_files[key], data) return eval_files
[ "def", "_annotate_validations", "(", "eval_files", ",", "data", ")", ":", "for", "key", "in", "[", "\"tp\"", ",", "\"tp-calls\"", ",", "\"fp\"", ",", "\"fn\"", "]", ":", "if", "eval_files", ".", "get", "(", "key", ")", ":", "eval_files", "[", "key", "]...
Add annotations about potential problem regions to validation VCFs.
[ "Add", "annotations", "about", "potential", "problem", "regions", "to", "validation", "VCFs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L186-L192
223,467
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_setup_call_false
def _setup_call_false(vrn_file, rm_bed, base_dir, data, call_type): """Create set of false positives or ngatives for inputs with empty truth sets. """ out_file = os.path.join(base_dir, "%s.vcf.gz" % call_type) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: if not vrn_file.endswith(".gz"): vrn_file = vcfutils.bgzip_and_index(vrn_file, out_dir=os.path.dirname(tx_out_file)) cmd = ("bcftools view -R {rm_bed} -f 'PASS,.' {vrn_file} -O z -o {tx_out_file}") do.run(cmd.format(**locals()), "Prepare %s with empty reference" % call_type, data) return {call_type: out_file}
python
def _setup_call_false(vrn_file, rm_bed, base_dir, data, call_type): """Create set of false positives or ngatives for inputs with empty truth sets. """ out_file = os.path.join(base_dir, "%s.vcf.gz" % call_type) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: if not vrn_file.endswith(".gz"): vrn_file = vcfutils.bgzip_and_index(vrn_file, out_dir=os.path.dirname(tx_out_file)) cmd = ("bcftools view -R {rm_bed} -f 'PASS,.' {vrn_file} -O z -o {tx_out_file}") do.run(cmd.format(**locals()), "Prepare %s with empty reference" % call_type, data) return {call_type: out_file}
[ "def", "_setup_call_false", "(", "vrn_file", ",", "rm_bed", ",", "base_dir", ",", "data", ",", "call_type", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "\"%s.vcf.gz\"", "%", "call_type", ")", "if", "not", "utils", "."...
Create set of false positives or ngatives for inputs with empty truth sets.
[ "Create", "set", "of", "false", "positives", "or", "ngatives", "for", "inputs", "with", "empty", "truth", "sets", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L196-L206
223,468
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_rtg_add_summary_file
def _rtg_add_summary_file(eval_files, base_dir, data): """Parse output TP FP and FN files to generate metrics for plotting. """ out_file = os.path.join(base_dir, "validate-summary.csv") if not utils.file_uptodate(out_file, eval_files.get("tp", eval_files.get("fp", eval_files["fn"]))): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["sample", "caller", "vtype", "metric", "value"]) base = _get_sample_and_caller(data) for metric in ["tp", "fp", "fn"]: for vtype, bcftools_types in [("SNPs", "--types snps"), ("Indels", "--exclude-types snps")]: in_file = eval_files.get(metric) if in_file and os.path.exists(in_file): cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l") count = int(subprocess.check_output(cmd.format(**locals()), shell=True)) else: count = 0 writer.writerow(base + [vtype, metric, count]) eval_files["summary"] = out_file return eval_files
python
def _rtg_add_summary_file(eval_files, base_dir, data): """Parse output TP FP and FN files to generate metrics for plotting. """ out_file = os.path.join(base_dir, "validate-summary.csv") if not utils.file_uptodate(out_file, eval_files.get("tp", eval_files.get("fp", eval_files["fn"]))): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["sample", "caller", "vtype", "metric", "value"]) base = _get_sample_and_caller(data) for metric in ["tp", "fp", "fn"]: for vtype, bcftools_types in [("SNPs", "--types snps"), ("Indels", "--exclude-types snps")]: in_file = eval_files.get(metric) if in_file and os.path.exists(in_file): cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l") count = int(subprocess.check_output(cmd.format(**locals()), shell=True)) else: count = 0 writer.writerow(base + [vtype, metric, count]) eval_files["summary"] = out_file return eval_files
[ "def", "_rtg_add_summary_file", "(", "eval_files", ",", "base_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "\"validate-summary.csv\"", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",...
Parse output TP FP and FN files to generate metrics for plotting.
[ "Parse", "output", "TP", "FP", "and", "FN", "files", "to", "generate", "metrics", "for", "plotting", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L214-L235
223,469
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_prepare_inputs
def _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data): """Prepare input VCF and BED files for validation. """ if not rm_file.endswith(".vcf.gz") or not os.path.exists(rm_file + ".tbi"): rm_file = vcfutils.bgzip_and_index(rm_file, data["config"], out_dir=base_dir) if len(vcfutils.get_samples(vrn_file)) > 1: base = utils.splitext_plus(os.path.basename(vrn_file))[0] sample_file = os.path.join(base_dir, "%s-%s.vcf.gz" % (base, dd.get_sample_name(data))) vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_file, data["config"]) # rtg fails on bgzipped VCFs produced by GatherVcfs so we re-prep them else: vrn_file = vcfutils.bgzip_and_index(vrn_file, data["config"], out_dir=base_dir) interval_bed = _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data) return vrn_file, rm_file, interval_bed
python
def _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data): """Prepare input VCF and BED files for validation. """ if not rm_file.endswith(".vcf.gz") or not os.path.exists(rm_file + ".tbi"): rm_file = vcfutils.bgzip_and_index(rm_file, data["config"], out_dir=base_dir) if len(vcfutils.get_samples(vrn_file)) > 1: base = utils.splitext_plus(os.path.basename(vrn_file))[0] sample_file = os.path.join(base_dir, "%s-%s.vcf.gz" % (base, dd.get_sample_name(data))) vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_file, data["config"]) # rtg fails on bgzipped VCFs produced by GatherVcfs so we re-prep them else: vrn_file = vcfutils.bgzip_and_index(vrn_file, data["config"], out_dir=base_dir) interval_bed = _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data) return vrn_file, rm_file, interval_bed
[ "def", "_prepare_inputs", "(", "vrn_file", ",", "rm_file", ",", "rm_interval_file", ",", "base_dir", ",", "data", ")", ":", "if", "not", "rm_file", ".", "endswith", "(", "\".vcf.gz\"", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "rm_file", "...
Prepare input VCF and BED files for validation.
[ "Prepare", "input", "VCF", "and", "BED", "files", "for", "validation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L237-L251
223,470
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_pick_best_quality_score
def _pick_best_quality_score(vrn_file): """Flexible quality score selection, picking the best available. Implementation based on discussion: https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249 (RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.) For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly. MuTect2 has TLOD in the INFO field. """ # pysam fails on checking reference contigs if input is empty if not vcfutils.vcf_has_variants(vrn_file): return "DP" to_check = 25 scores = collections.defaultdict(int) try: in_handle = VariantFile(vrn_file) except ValueError: raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file) with contextlib.closing(in_handle) as val_in: for i, rec in enumerate(val_in): if i > to_check: break if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None: scores["INFO=VQSLOD"] += 1 if "TLOD" in rec.info and rec.info.get("TLOD") is not None: scores["INFO=TLOD"] += 1 for skey in ["AVR", "GQ", "DP"]: if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None: scores[skey] += 1 if rec.qual: scores["QUAL"] += 1 for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]: if scores[key] > 0: return key raise ValueError("Did not find quality score for validation from %s" % vrn_file)
python
def _pick_best_quality_score(vrn_file): """Flexible quality score selection, picking the best available. Implementation based on discussion: https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249 (RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.) For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly. MuTect2 has TLOD in the INFO field. """ # pysam fails on checking reference contigs if input is empty if not vcfutils.vcf_has_variants(vrn_file): return "DP" to_check = 25 scores = collections.defaultdict(int) try: in_handle = VariantFile(vrn_file) except ValueError: raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file) with contextlib.closing(in_handle) as val_in: for i, rec in enumerate(val_in): if i > to_check: break if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None: scores["INFO=VQSLOD"] += 1 if "TLOD" in rec.info and rec.info.get("TLOD") is not None: scores["INFO=TLOD"] += 1 for skey in ["AVR", "GQ", "DP"]: if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None: scores[skey] += 1 if rec.qual: scores["QUAL"] += 1 for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]: if scores[key] > 0: return key raise ValueError("Did not find quality score for validation from %s" % vrn_file)
[ "def", "_pick_best_quality_score", "(", "vrn_file", ")", ":", "# pysam fails on checking reference contigs if input is empty", "if", "not", "vcfutils", ".", "vcf_has_variants", "(", "vrn_file", ")", ":", "return", "\"DP\"", "to_check", "=", "25", "scores", "=", "collect...
Flexible quality score selection, picking the best available. Implementation based on discussion: https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249 (RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.) For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly. MuTect2 has TLOD in the INFO field.
[ "Flexible", "quality", "score", "selection", "picking", "the", "best", "available", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L305-L342
223,471
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_get_merged_intervals
def _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data): """Retrieve intervals to run validation on, merging reference and callable BED files. """ a_intervals = get_analysis_intervals(data, vrn_file, base_dir) if a_intervals: final_intervals = shared.remove_lcr_regions(a_intervals, [data]) if rm_interval_file: caller = _get_caller(data) sample = dd.get_sample_name(data) combo_intervals = os.path.join(base_dir, "%s-%s-%s-wrm.bed" % (utils.splitext_plus(os.path.basename(final_intervals))[0], sample, caller)) if not utils.file_uptodate(combo_intervals, final_intervals): with file_transaction(data, combo_intervals) as tx_out_file: with utils.chdir(os.path.dirname(tx_out_file)): # Copy files locally to avoid issues on shared filesystems # where BEDtools has trouble accessing the same base # files from multiple locations a = os.path.basename(final_intervals) b = os.path.basename(rm_interval_file) try: shutil.copyfile(final_intervals, a) except IOError: time.sleep(60) shutil.copyfile(final_intervals, a) try: shutil.copyfile(rm_interval_file, b) except IOError: time.sleep(60) shutil.copyfile(rm_interval_file, b) cmd = ("bedtools intersect -nonamecheck -a {a} -b {b} > {tx_out_file}") do.run(cmd.format(**locals()), "Intersect callable intervals for rtg vcfeval") final_intervals = combo_intervals else: assert rm_interval_file, "No intervals to subset analysis with for %s" % vrn_file final_intervals = shared.remove_lcr_regions(rm_interval_file, [data]) return final_intervals
python
def _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data): """Retrieve intervals to run validation on, merging reference and callable BED files. """ a_intervals = get_analysis_intervals(data, vrn_file, base_dir) if a_intervals: final_intervals = shared.remove_lcr_regions(a_intervals, [data]) if rm_interval_file: caller = _get_caller(data) sample = dd.get_sample_name(data) combo_intervals = os.path.join(base_dir, "%s-%s-%s-wrm.bed" % (utils.splitext_plus(os.path.basename(final_intervals))[0], sample, caller)) if not utils.file_uptodate(combo_intervals, final_intervals): with file_transaction(data, combo_intervals) as tx_out_file: with utils.chdir(os.path.dirname(tx_out_file)): # Copy files locally to avoid issues on shared filesystems # where BEDtools has trouble accessing the same base # files from multiple locations a = os.path.basename(final_intervals) b = os.path.basename(rm_interval_file) try: shutil.copyfile(final_intervals, a) except IOError: time.sleep(60) shutil.copyfile(final_intervals, a) try: shutil.copyfile(rm_interval_file, b) except IOError: time.sleep(60) shutil.copyfile(rm_interval_file, b) cmd = ("bedtools intersect -nonamecheck -a {a} -b {b} > {tx_out_file}") do.run(cmd.format(**locals()), "Intersect callable intervals for rtg vcfeval") final_intervals = combo_intervals else: assert rm_interval_file, "No intervals to subset analysis with for %s" % vrn_file final_intervals = shared.remove_lcr_regions(rm_interval_file, [data]) return final_intervals
[ "def", "_get_merged_intervals", "(", "rm_interval_file", ",", "vrn_file", ",", "base_dir", ",", "data", ")", ":", "a_intervals", "=", "get_analysis_intervals", "(", "data", ",", "vrn_file", ",", "base_dir", ")", "if", "a_intervals", ":", "final_intervals", "=", ...
Retrieve intervals to run validation on, merging reference and callable BED files.
[ "Retrieve", "intervals", "to", "run", "validation", "on", "merging", "reference", "and", "callable", "BED", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L344-L380
223,472
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_callable_from_gvcf
def _callable_from_gvcf(data, vrn_file, out_dir): """Retrieve callable regions based on ref call regions in gVCF. Uses https://github.com/lijiayong/gvcf_regions """ methods = {"freebayes": "freebayes", "platypus": "platypus", "gatk-haplotype": "gatk"} gvcf_type = methods.get(dd.get_variantcaller(data)) if gvcf_type: out_file = os.path.join(out_dir, "%s-gcvf-coverage.bed" % utils.splitext_plus(os.path.basename(vrn_file))[0]) if not utils.file_uptodate(out_file, vrn_file): with file_transaction(data, out_file) as tx_out_file: cmd = ("gvcf_regions.py --gvcf_type {gvcf_type} {vrn_file} " "| bedtools merge > {tx_out_file}") do.run(cmd.format(**locals()), "Convert gVCF to BED file of callable regions") return out_file
python
def _callable_from_gvcf(data, vrn_file, out_dir): """Retrieve callable regions based on ref call regions in gVCF. Uses https://github.com/lijiayong/gvcf_regions """ methods = {"freebayes": "freebayes", "platypus": "platypus", "gatk-haplotype": "gatk"} gvcf_type = methods.get(dd.get_variantcaller(data)) if gvcf_type: out_file = os.path.join(out_dir, "%s-gcvf-coverage.bed" % utils.splitext_plus(os.path.basename(vrn_file))[0]) if not utils.file_uptodate(out_file, vrn_file): with file_transaction(data, out_file) as tx_out_file: cmd = ("gvcf_regions.py --gvcf_type {gvcf_type} {vrn_file} " "| bedtools merge > {tx_out_file}") do.run(cmd.format(**locals()), "Convert gVCF to BED file of callable regions") return out_file
[ "def", "_callable_from_gvcf", "(", "data", ",", "vrn_file", ",", "out_dir", ")", ":", "methods", "=", "{", "\"freebayes\"", ":", "\"freebayes\"", ",", "\"platypus\"", ":", "\"platypus\"", ",", "\"gatk-haplotype\"", ":", "\"gatk\"", "}", "gvcf_type", "=", "method...
Retrieve callable regions based on ref call regions in gVCF. Uses https://github.com/lijiayong/gvcf_regions
[ "Retrieve", "callable", "regions", "based", "on", "ref", "call", "regions", "in", "gVCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L382-L398
223,473
bcbio/bcbio-nextgen
bcbio/variation/validate.py
get_analysis_intervals
def get_analysis_intervals(data, vrn_file, base_dir): """Retrieve analysis regions for the current variant calling pipeline. """ from bcbio.bam import callable if vrn_file and vcfutils.is_gvcf_file(vrn_file): callable_bed = _callable_from_gvcf(data, vrn_file, base_dir) if callable_bed: return callable_bed if data.get("ensemble_bed"): return data["ensemble_bed"] elif dd.get_sample_callable(data): return dd.get_sample_callable(data) elif data.get("align_bam"): return callable.sample_callable_bed(data["align_bam"], dd.get_ref_file(data), data)[0] elif data.get("work_bam"): return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)[0] elif data.get("work_bam_callable"): data = utils.deepish_copy(data) data["work_bam"] = data.pop("work_bam_callable") return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)[0] elif tz.get_in(["config", "algorithm", "callable_regions"], data): return tz.get_in(["config", "algorithm", "callable_regions"], data) elif tz.get_in(["config", "algorithm", "variant_regions"], data): return tz.get_in(["config", "algorithm", "variant_regions"], data)
python
def get_analysis_intervals(data, vrn_file, base_dir): """Retrieve analysis regions for the current variant calling pipeline. """ from bcbio.bam import callable if vrn_file and vcfutils.is_gvcf_file(vrn_file): callable_bed = _callable_from_gvcf(data, vrn_file, base_dir) if callable_bed: return callable_bed if data.get("ensemble_bed"): return data["ensemble_bed"] elif dd.get_sample_callable(data): return dd.get_sample_callable(data) elif data.get("align_bam"): return callable.sample_callable_bed(data["align_bam"], dd.get_ref_file(data), data)[0] elif data.get("work_bam"): return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)[0] elif data.get("work_bam_callable"): data = utils.deepish_copy(data) data["work_bam"] = data.pop("work_bam_callable") return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)[0] elif tz.get_in(["config", "algorithm", "callable_regions"], data): return tz.get_in(["config", "algorithm", "callable_regions"], data) elif tz.get_in(["config", "algorithm", "variant_regions"], data): return tz.get_in(["config", "algorithm", "variant_regions"], data)
[ "def", "get_analysis_intervals", "(", "data", ",", "vrn_file", ",", "base_dir", ")", ":", "from", "bcbio", ".", "bam", "import", "callable", "if", "vrn_file", "and", "vcfutils", ".", "is_gvcf_file", "(", "vrn_file", ")", ":", "callable_bed", "=", "_callable_fr...
Retrieve analysis regions for the current variant calling pipeline.
[ "Retrieve", "analysis", "regions", "for", "the", "current", "variant", "calling", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L400-L424
223,474
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_get_location_list
def _get_location_list(interval_bed): """Retrieve list of locations to analyze from input BED file. """ import pybedtools regions = collections.OrderedDict() for region in pybedtools.BedTool(interval_bed): regions[str(region.chrom)] = None return regions.keys()
python
def _get_location_list(interval_bed): """Retrieve list of locations to analyze from input BED file. """ import pybedtools regions = collections.OrderedDict() for region in pybedtools.BedTool(interval_bed): regions[str(region.chrom)] = None return regions.keys()
[ "def", "_get_location_list", "(", "interval_bed", ")", ":", "import", "pybedtools", "regions", "=", "collections", ".", "OrderedDict", "(", ")", "for", "region", "in", "pybedtools", ".", "BedTool", "(", "interval_bed", ")", ":", "regions", "[", "str", "(", "...
Retrieve list of locations to analyze from input BED file.
[ "Retrieve", "list", "of", "locations", "to", "analyze", "from", "input", "BED", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L443-L450
223,475
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_run_bcbio_variation
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data): """Run validation of a caller against the truth set using bcbio.variation. """ val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file, base_dir, data) work_dir = os.path.join(base_dir, "work") out = {"summary": os.path.join(work_dir, "validate-summary.csv"), "grading": os.path.join(work_dir, "validate-grading.yaml"), "discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)} if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]): bcbio_variation_comparison(val_config_file, base_dir, data) out["concordant"] = filter(os.path.exists, [os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x)) for x in ["eval-ref", "ref-eval"]])[0] return out
python
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data): """Run validation of a caller against the truth set using bcbio.variation. """ val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file, base_dir, data) work_dir = os.path.join(base_dir, "work") out = {"summary": os.path.join(work_dir, "validate-summary.csv"), "grading": os.path.join(work_dir, "validate-grading.yaml"), "discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)} if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]): bcbio_variation_comparison(val_config_file, base_dir, data) out["concordant"] = filter(os.path.exists, [os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x)) for x in ["eval-ref", "ref-eval"]])[0] return out
[ "def", "_run_bcbio_variation", "(", "vrn_file", ",", "rm_file", ",", "rm_interval_file", ",", "base_dir", ",", "sample", ",", "caller", ",", "data", ")", ":", "val_config_file", "=", "_create_validate_config_file", "(", "vrn_file", ",", "rm_file", ",", "rm_interva...
Run validation of a caller against the truth set using bcbio.variation.
[ "Run", "validation", "of", "a", "caller", "against", "the", "truth", "set", "using", "bcbio", ".", "variation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L454-L468
223,476
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_create_validate_config
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data): """Create a bcbio.variation configuration input for validation. """ ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref", "fix-sample-header": True, "remove-refcalls": True} a_intervals = get_analysis_intervals(data, vrn_file, base_dir) if a_intervals: a_intervals = shared.remove_lcr_regions(a_intervals, [data]) if rm_interval_file: ref_call["intervals"] = rm_interval_file eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True} exp = {"sample": data["name"][-1], "ref": dd.get_ref_file(data), "approach": "grade", "calls": [ref_call, eval_call]} if a_intervals: exp["intervals"] = os.path.abspath(a_intervals) if data.get("align_bam"): exp["align"] = data["align_bam"] elif data.get("work_bam"): exp["align"] = data["work_bam"] return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"}, "experiments": [exp]}
python
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data): """Create a bcbio.variation configuration input for validation. """ ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref", "fix-sample-header": True, "remove-refcalls": True} a_intervals = get_analysis_intervals(data, vrn_file, base_dir) if a_intervals: a_intervals = shared.remove_lcr_regions(a_intervals, [data]) if rm_interval_file: ref_call["intervals"] = rm_interval_file eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True} exp = {"sample": data["name"][-1], "ref": dd.get_ref_file(data), "approach": "grade", "calls": [ref_call, eval_call]} if a_intervals: exp["intervals"] = os.path.abspath(a_intervals) if data.get("align_bam"): exp["align"] = data["align_bam"] elif data.get("work_bam"): exp["align"] = data["work_bam"] return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"}, "experiments": [exp]}
[ "def", "_create_validate_config", "(", "vrn_file", ",", "rm_file", ",", "rm_interval_file", ",", "base_dir", ",", "data", ")", ":", "ref_call", "=", "{", "\"file\"", ":", "str", "(", "rm_file", ")", ",", "\"name\"", ":", "\"ref\"", ",", "\"type\"", ":", "\...
Create a bcbio.variation configuration input for validation.
[ "Create", "a", "bcbio", ".", "variation", "configuration", "input", "for", "validation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L492-L514
223,477
bcbio/bcbio-nextgen
bcbio/variation/validate.py
summarize_grading
def summarize_grading(samples, vkey="validate"): """Provide summaries of grading results across all samples. Handles both traditional pipelines (validation part of variants) and CWL pipelines (validation at top level) """ samples = list(utils.flatten(samples)) if not _has_grading_info(samples, vkey): return [[d] for d in samples] validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey)) header = ["sample", "caller", "variant.type", "category", "value"] _summarize_combined(samples, vkey) validated, out = _group_validate_samples(samples, vkey, (["metadata", "validate_batch"], ["metadata", "batch"], ["description"])) for vname, vitems in validated.items(): out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname) with open(out_csv, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(header) plot_data = [] plot_files = [] for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x)) or ""): validations = [variant.get(vkey) for variant in data.get("variants", []) if isinstance(variant, dict)] validations = [v for v in validations if v] if len(validations) == 0 and vkey in data: validations = [data.get(vkey)] for validate in validations: if validate: validate["grading_summary"] = out_csv if validate.get("grading"): for row in _get_validate_plotdata_yaml(validate["grading"], data): writer.writerow(row) plot_data.append(row) elif validate.get("summary") and not validate.get("summary") == "None": if isinstance(validate["summary"], (list, tuple)): plot_files.extend(list(set(validate["summary"]))) else: plot_files.append(validate["summary"]) if plot_files: plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv) elif plot_data: plots = validateplot.create(plot_data, header, 0, data["config"], os.path.splitext(out_csv)[0]) else: plots = [] for data in vitems: if data.get(vkey): data[vkey]["grading_plots"] = plots for variant in data.get("variants", []): if isinstance(variant, dict) and variant.get(vkey): variant[vkey]["grading_plots"] = plots out.append([data]) return out
python
def summarize_grading(samples, vkey="validate"): """Provide summaries of grading results across all samples. Handles both traditional pipelines (validation part of variants) and CWL pipelines (validation at top level) """ samples = list(utils.flatten(samples)) if not _has_grading_info(samples, vkey): return [[d] for d in samples] validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey)) header = ["sample", "caller", "variant.type", "category", "value"] _summarize_combined(samples, vkey) validated, out = _group_validate_samples(samples, vkey, (["metadata", "validate_batch"], ["metadata", "batch"], ["description"])) for vname, vitems in validated.items(): out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname) with open(out_csv, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(header) plot_data = [] plot_files = [] for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x)) or ""): validations = [variant.get(vkey) for variant in data.get("variants", []) if isinstance(variant, dict)] validations = [v for v in validations if v] if len(validations) == 0 and vkey in data: validations = [data.get(vkey)] for validate in validations: if validate: validate["grading_summary"] = out_csv if validate.get("grading"): for row in _get_validate_plotdata_yaml(validate["grading"], data): writer.writerow(row) plot_data.append(row) elif validate.get("summary") and not validate.get("summary") == "None": if isinstance(validate["summary"], (list, tuple)): plot_files.extend(list(set(validate["summary"]))) else: plot_files.append(validate["summary"]) if plot_files: plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv) elif plot_data: plots = validateplot.create(plot_data, header, 0, data["config"], os.path.splitext(out_csv)[0]) else: plots = [] for data in vitems: if data.get(vkey): data[vkey]["grading_plots"] = plots for variant in data.get("variants", []): if isinstance(variant, dict) and variant.get(vkey): variant[vkey]["grading_plots"] = plots out.append([data]) return out
[ "def", "summarize_grading", "(", "samples", ",", "vkey", "=", "\"validate\"", ")", ":", "samples", "=", "list", "(", "utils", ".", "flatten", "(", "samples", ")", ")", "if", "not", "_has_grading_info", "(", "samples", ",", "vkey", ")", ":", "return", "["...
Provide summaries of grading results across all samples. Handles both traditional pipelines (validation part of variants) and CWL pipelines (validation at top level)
[ "Provide", "summaries", "of", "grading", "results", "across", "all", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L560-L613
223,478
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_summarize_combined
def _summarize_combined(samples, vkey): """Prepare summarized CSV and plot files for samples to combine together. Helps handle cases where we want to summarize over multiple samples. """ validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey)) combined, _ = _group_validate_samples(samples, vkey, [["metadata", "validate_combine"]]) for vname, vitems in combined.items(): if vname: cur_combined = collections.defaultdict(int) for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))): validations = [variant.get(vkey) for variant in data.get("variants", [])] validations = [v for v in validations if v] if len(validations) == 0 and vkey in data: validations = [data.get(vkey)] for validate in validations: with open(validate["summary"]) as in_handle: reader = csv.reader(in_handle) next(reader) # header for _, caller, vtype, metric, value in reader: cur_combined[(caller, vtype, metric)] += int(value) out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname) with open(out_csv, "w") as out_handle: writer = csv.writer(out_handle) header = ["sample", "caller", "vtype", "metric", "value"] writer.writerow(header) for (caller, variant_type, category), val in cur_combined.items(): writer.writerow(["combined-%s" % vname, caller, variant_type, category, val]) plots = validateplot.classifyplot_from_valfile(out_csv)
python
def _summarize_combined(samples, vkey): """Prepare summarized CSV and plot files for samples to combine together. Helps handle cases where we want to summarize over multiple samples. """ validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey)) combined, _ = _group_validate_samples(samples, vkey, [["metadata", "validate_combine"]]) for vname, vitems in combined.items(): if vname: cur_combined = collections.defaultdict(int) for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))): validations = [variant.get(vkey) for variant in data.get("variants", [])] validations = [v for v in validations if v] if len(validations) == 0 and vkey in data: validations = [data.get(vkey)] for validate in validations: with open(validate["summary"]) as in_handle: reader = csv.reader(in_handle) next(reader) # header for _, caller, vtype, metric, value in reader: cur_combined[(caller, vtype, metric)] += int(value) out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname) with open(out_csv, "w") as out_handle: writer = csv.writer(out_handle) header = ["sample", "caller", "vtype", "metric", "value"] writer.writerow(header) for (caller, variant_type, category), val in cur_combined.items(): writer.writerow(["combined-%s" % vname, caller, variant_type, category, val]) plots = validateplot.classifyplot_from_valfile(out_csv)
[ "def", "_summarize_combined", "(", "samples", ",", "vkey", ")", ":", "validate_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "samples", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "vkey", ")", ...
Prepare summarized CSV and plot files for samples to combine together. Helps handle cases where we want to summarize over multiple samples.
[ "Prepare", "summarized", "CSV", "and", "plot", "files", "for", "samples", "to", "combine", "together", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L615-L643
223,479
bcbio/bcbio-nextgen
bcbio/variation/validate.py
combine_validations
def combine_validations(items, vkey="validate"): """Combine multiple batch validations into validation outputs. """ csvs = set([]) pngs = set([]) for v in [x.get(vkey) for x in items]: if v and v.get("grading_summary"): csvs.add(v.get("grading_summary")) if v and v.get("grading_plots"): pngs |= set(v.get("grading_plots")) if len(csvs) == 1: grading_summary = csvs.pop() else: grading_summary = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(items[0]), vkey)), "grading-summary-combined.csv") with open(grading_summary, "w") as out_handle: for i, csv in enumerate(sorted(list(csvs))): with open(csv) as in_handle: h = in_handle.readline() if i == 0: out_handle.write(h) for l in in_handle: out_handle.write(l) return {"grading_plots": sorted(list(pngs)), "grading_summary": grading_summary}
python
def combine_validations(items, vkey="validate"): """Combine multiple batch validations into validation outputs. """ csvs = set([]) pngs = set([]) for v in [x.get(vkey) for x in items]: if v and v.get("grading_summary"): csvs.add(v.get("grading_summary")) if v and v.get("grading_plots"): pngs |= set(v.get("grading_plots")) if len(csvs) == 1: grading_summary = csvs.pop() else: grading_summary = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(items[0]), vkey)), "grading-summary-combined.csv") with open(grading_summary, "w") as out_handle: for i, csv in enumerate(sorted(list(csvs))): with open(csv) as in_handle: h = in_handle.readline() if i == 0: out_handle.write(h) for l in in_handle: out_handle.write(l) return {"grading_plots": sorted(list(pngs)), "grading_summary": grading_summary}
[ "def", "combine_validations", "(", "items", ",", "vkey", "=", "\"validate\"", ")", ":", "csvs", "=", "set", "(", "[", "]", ")", "pngs", "=", "set", "(", "[", "]", ")", "for", "v", "in", "[", "x", ".", "get", "(", "vkey", ")", "for", "x", "in", ...
Combine multiple batch validations into validation outputs.
[ "Combine", "multiple", "batch", "validations", "into", "validation", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L645-L668
223,480
bcbio/bcbio-nextgen
bcbio/variation/validate.py
freq_summary
def freq_summary(val_file, call_file, truth_file, target_name): """Summarize true and false positive calls by variant type and frequency. Resolve differences in true/false calls based on output from hap.py: https://github.com/sequencing/hap.py """ out_file = "%s-freqs.csv" % utils.splitext_plus(val_file)[0] truth_freqs = _read_truth_freqs(truth_file) call_freqs = _read_call_freqs(call_file, target_name) with VariantFile(val_file) as val_in: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["vtype", "valclass", "freq"]) for rec in val_in: call_type = _classify_rec(rec) val_type = _get_validation_status(rec) key = _get_key(rec) freq = truth_freqs.get(key, call_freqs.get(key, 0.0)) writer.writerow([call_type, val_type, freq]) return out_file
python
def freq_summary(val_file, call_file, truth_file, target_name): """Summarize true and false positive calls by variant type and frequency. Resolve differences in true/false calls based on output from hap.py: https://github.com/sequencing/hap.py """ out_file = "%s-freqs.csv" % utils.splitext_plus(val_file)[0] truth_freqs = _read_truth_freqs(truth_file) call_freqs = _read_call_freqs(call_file, target_name) with VariantFile(val_file) as val_in: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["vtype", "valclass", "freq"]) for rec in val_in: call_type = _classify_rec(rec) val_type = _get_validation_status(rec) key = _get_key(rec) freq = truth_freqs.get(key, call_freqs.get(key, 0.0)) writer.writerow([call_type, val_type, freq]) return out_file
[ "def", "freq_summary", "(", "val_file", ",", "call_file", ",", "truth_file", ",", "target_name", ")", ":", "out_file", "=", "\"%s-freqs.csv\"", "%", "utils", ".", "splitext_plus", "(", "val_file", ")", "[", "0", "]", "truth_freqs", "=", "_read_truth_freqs", "(...
Summarize true and false positive calls by variant type and frequency. Resolve differences in true/false calls based on output from hap.py: https://github.com/sequencing/hap.py
[ "Summarize", "true", "and", "false", "positive", "calls", "by", "variant", "type", "and", "frequency", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L684-L703
223,481
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_read_call_freqs
def _read_call_freqs(in_file, sample_name): """Identify frequencies for calls in the input file. """ from bcbio.heterogeneity import bubbletree out = {} with VariantFile(in_file) as call_in: for rec in call_in: if rec.filter.keys() == ["PASS"]: for name, sample in rec.samples.items(): if name == sample_name: alt, depth, freq = bubbletree.sample_alt_and_depth(rec, sample) if freq is not None: out[_get_key(rec)] = freq return out
python
def _read_call_freqs(in_file, sample_name): """Identify frequencies for calls in the input file. """ from bcbio.heterogeneity import bubbletree out = {} with VariantFile(in_file) as call_in: for rec in call_in: if rec.filter.keys() == ["PASS"]: for name, sample in rec.samples.items(): if name == sample_name: alt, depth, freq = bubbletree.sample_alt_and_depth(rec, sample) if freq is not None: out[_get_key(rec)] = freq return out
[ "def", "_read_call_freqs", "(", "in_file", ",", "sample_name", ")", ":", "from", "bcbio", ".", "heterogeneity", "import", "bubbletree", "out", "=", "{", "}", "with", "VariantFile", "(", "in_file", ")", "as", "call_in", ":", "for", "rec", "in", "call_in", "...
Identify frequencies for calls in the input file.
[ "Identify", "frequencies", "for", "calls", "in", "the", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L721-L734
223,482
bcbio/bcbio-nextgen
bcbio/variation/validate.py
_read_truth_freqs
def _read_truth_freqs(in_file): """Read frequency of calls from truth VCF. Currently handles DREAM data, needs generalization for other datasets. """ out = {} with VariantFile(in_file) as bcf_in: for rec in bcf_in: freq = float(rec.info.get("VAF", 1.0)) out[_get_key(rec)] = freq return out
python
def _read_truth_freqs(in_file): """Read frequency of calls from truth VCF. Currently handles DREAM data, needs generalization for other datasets. """ out = {} with VariantFile(in_file) as bcf_in: for rec in bcf_in: freq = float(rec.info.get("VAF", 1.0)) out[_get_key(rec)] = freq return out
[ "def", "_read_truth_freqs", "(", "in_file", ")", ":", "out", "=", "{", "}", "with", "VariantFile", "(", "in_file", ")", "as", "bcf_in", ":", "for", "rec", "in", "bcf_in", ":", "freq", "=", "float", "(", "rec", ".", "info", ".", "get", "(", "\"VAF\"",...
Read frequency of calls from truth VCF. Currently handles DREAM data, needs generalization for other datasets.
[ "Read", "frequency", "of", "calls", "from", "truth", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L736-L746
223,483
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
to_rec
def to_rec(samples, default_keys=None): """Convert inputs into CWL records, useful for single item parallelization. """ recs = samples_to_records([normalize_missing(utils.to_single_data(x)) for x in samples], default_keys) return [[x] for x in recs]
python
def to_rec(samples, default_keys=None): """Convert inputs into CWL records, useful for single item parallelization. """ recs = samples_to_records([normalize_missing(utils.to_single_data(x)) for x in samples], default_keys) return [[x] for x in recs]
[ "def", "to_rec", "(", "samples", ",", "default_keys", "=", "None", ")", ":", "recs", "=", "samples_to_records", "(", "[", "normalize_missing", "(", "utils", ".", "to_single_data", "(", "x", ")", ")", "for", "x", "in", "samples", "]", ",", "default_keys", ...
Convert inputs into CWL records, useful for single item parallelization.
[ "Convert", "inputs", "into", "CWL", "records", "useful", "for", "single", "item", "parallelization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L20-L24
223,484
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
to_rec_single
def to_rec_single(samples, default_keys=None): """Convert output into a list of single CWL records. """ out = [] for data in samples: recs = samples_to_records([normalize_missing(utils.to_single_data(data))], default_keys) assert len(recs) == 1 out.append(recs[0]) return out
python
def to_rec_single(samples, default_keys=None): """Convert output into a list of single CWL records. """ out = [] for data in samples: recs = samples_to_records([normalize_missing(utils.to_single_data(data))], default_keys) assert len(recs) == 1 out.append(recs[0]) return out
[ "def", "to_rec_single", "(", "samples", ",", "default_keys", "=", "None", ")", ":", "out", "=", "[", "]", "for", "data", "in", "samples", ":", "recs", "=", "samples_to_records", "(", "[", "normalize_missing", "(", "utils", ".", "to_single_data", "(", "data...
Convert output into a list of single CWL records.
[ "Convert", "output", "into", "a", "list", "of", "single", "CWL", "records", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L26-L34
223,485
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
handle_combined_input
def handle_combined_input(args): """Check for cases where we have a combined input nested list. In these cases the CWL will be double nested: [[[rec_a], [rec_b]]] and we remove the outer nesting. """ cur_args = args[:] while len(cur_args) == 1 and isinstance(cur_args[0], (list, tuple)): cur_args = cur_args[0] return cur_args
python
def handle_combined_input(args): """Check for cases where we have a combined input nested list. In these cases the CWL will be double nested: [[[rec_a], [rec_b]]] and we remove the outer nesting. """ cur_args = args[:] while len(cur_args) == 1 and isinstance(cur_args[0], (list, tuple)): cur_args = cur_args[0] return cur_args
[ "def", "handle_combined_input", "(", "args", ")", ":", "cur_args", "=", "args", "[", ":", "]", "while", "len", "(", "cur_args", ")", "==", "1", "and", "isinstance", "(", "cur_args", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "cur...
Check for cases where we have a combined input nested list. In these cases the CWL will be double nested: [[[rec_a], [rec_b]]] and we remove the outer nesting.
[ "Check", "for", "cases", "where", "we", "have", "a", "combined", "input", "nested", "list", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L39-L51
223,486
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
normalize_missing
def normalize_missing(xs): """Normalize missing values to avoid string 'None' inputs. """ if isinstance(xs, dict): for k, v in xs.items(): xs[k] = normalize_missing(v) elif isinstance(xs, (list, tuple)): xs = [normalize_missing(x) for x in xs] elif isinstance(xs, six.string_types): if xs.lower() in ["none", "null"]: xs = None elif xs.lower() == "true": xs = True elif xs.lower() == "false": xs = False return xs
python
def normalize_missing(xs): """Normalize missing values to avoid string 'None' inputs. """ if isinstance(xs, dict): for k, v in xs.items(): xs[k] = normalize_missing(v) elif isinstance(xs, (list, tuple)): xs = [normalize_missing(x) for x in xs] elif isinstance(xs, six.string_types): if xs.lower() in ["none", "null"]: xs = None elif xs.lower() == "true": xs = True elif xs.lower() == "false": xs = False return xs
[ "def", "normalize_missing", "(", "xs", ")", ":", "if", "isinstance", "(", "xs", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "xs", ".", "items", "(", ")", ":", "xs", "[", "k", "]", "=", "normalize_missing", "(", "v", ")", "elif", "isinstanc...
Normalize missing values to avoid string 'None' inputs.
[ "Normalize", "missing", "values", "to", "avoid", "string", "None", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L53-L68
223,487
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
unpack_tarballs
def unpack_tarballs(xs, data, use_subdir=True): """Unpack workflow tarballs into ready to use directories. """ if isinstance(xs, dict): for k, v in xs.items(): xs[k] = unpack_tarballs(v, data, use_subdir) elif isinstance(xs, (list, tuple)): xs = [unpack_tarballs(x, data, use_subdir) for x in xs] elif isinstance(xs, six.string_types): if os.path.isfile(xs.encode("utf-8", "ignore")) and xs.endswith("-wf.tar.gz"): if use_subdir: tarball_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "wf-inputs")) else: tarball_dir = dd.get_work_dir(data) out_dir = os.path.join(tarball_dir, os.path.basename(xs).replace("-wf.tar.gz", "").replace("--", os.path.sep)) if not os.path.exists(out_dir): with utils.chdir(tarball_dir): with tarfile.open(xs, "r:gz") as tar: tar.extractall() assert os.path.exists(out_dir), out_dir # Default to representing output directory xs = out_dir # Look for aligner indices for fname in os.listdir(out_dir): if fname.endswith(DIR_TARGETS): xs = os.path.join(out_dir, fname) break elif fname.endswith(BASENAME_TARGETS): base = os.path.join(out_dir, utils.splitext_plus(os.path.basename(fname))[0]) xs = glob.glob("%s*" % base) break return xs
python
def unpack_tarballs(xs, data, use_subdir=True): """Unpack workflow tarballs into ready to use directories. """ if isinstance(xs, dict): for k, v in xs.items(): xs[k] = unpack_tarballs(v, data, use_subdir) elif isinstance(xs, (list, tuple)): xs = [unpack_tarballs(x, data, use_subdir) for x in xs] elif isinstance(xs, six.string_types): if os.path.isfile(xs.encode("utf-8", "ignore")) and xs.endswith("-wf.tar.gz"): if use_subdir: tarball_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "wf-inputs")) else: tarball_dir = dd.get_work_dir(data) out_dir = os.path.join(tarball_dir, os.path.basename(xs).replace("-wf.tar.gz", "").replace("--", os.path.sep)) if not os.path.exists(out_dir): with utils.chdir(tarball_dir): with tarfile.open(xs, "r:gz") as tar: tar.extractall() assert os.path.exists(out_dir), out_dir # Default to representing output directory xs = out_dir # Look for aligner indices for fname in os.listdir(out_dir): if fname.endswith(DIR_TARGETS): xs = os.path.join(out_dir, fname) break elif fname.endswith(BASENAME_TARGETS): base = os.path.join(out_dir, utils.splitext_plus(os.path.basename(fname))[0]) xs = glob.glob("%s*" % base) break return xs
[ "def", "unpack_tarballs", "(", "xs", ",", "data", ",", "use_subdir", "=", "True", ")", ":", "if", "isinstance", "(", "xs", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "xs", ".", "items", "(", ")", ":", "xs", "[", "k", "]", "=", "unpack_t...
Unpack workflow tarballs into ready to use directories.
[ "Unpack", "workflow", "tarballs", "into", "ready", "to", "use", "directories", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L76-L108
223,488
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
_get_all_cwlkeys
def _get_all_cwlkeys(items, default_keys=None): """Retrieve cwlkeys from inputs, handling defaults which can be null. When inputs are null in some and present in others, this creates unequal keys in each sample, confusing decision making about which are primary and extras. """ if default_keys: default_keys = set(default_keys) else: default_keys = set(["metadata__batch", "config__algorithm__validate", "config__algorithm__validate_regions", "config__algorithm__validate_regions_merged", "config__algorithm__variant_regions", "validate__summary", "validate__tp", "validate__fp", "validate__fn", "config__algorithm__coverage", "config__algorithm__coverage_merged", "genome_resources__variation__cosmic", "genome_resources__variation__dbsnp", "genome_resources__variation__clinvar" ]) all_keys = set([]) for data in items: all_keys.update(set(data["cwl_keys"])) all_keys.update(default_keys) return all_keys
python
def _get_all_cwlkeys(items, default_keys=None): """Retrieve cwlkeys from inputs, handling defaults which can be null. When inputs are null in some and present in others, this creates unequal keys in each sample, confusing decision making about which are primary and extras. """ if default_keys: default_keys = set(default_keys) else: default_keys = set(["metadata__batch", "config__algorithm__validate", "config__algorithm__validate_regions", "config__algorithm__validate_regions_merged", "config__algorithm__variant_regions", "validate__summary", "validate__tp", "validate__fp", "validate__fn", "config__algorithm__coverage", "config__algorithm__coverage_merged", "genome_resources__variation__cosmic", "genome_resources__variation__dbsnp", "genome_resources__variation__clinvar" ]) all_keys = set([]) for data in items: all_keys.update(set(data["cwl_keys"])) all_keys.update(default_keys) return all_keys
[ "def", "_get_all_cwlkeys", "(", "items", ",", "default_keys", "=", "None", ")", ":", "if", "default_keys", ":", "default_keys", "=", "set", "(", "default_keys", ")", "else", ":", "default_keys", "=", "set", "(", "[", "\"metadata__batch\"", ",", "\"config__algo...
Retrieve cwlkeys from inputs, handling defaults which can be null. When inputs are null in some and present in others, this creates unequal keys in each sample, confusing decision making about which are primary and extras.
[ "Retrieve", "cwlkeys", "from", "inputs", "handling", "defaults", "which", "can", "be", "null", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L110-L133
223,489
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
split_data_cwl_items
def split_data_cwl_items(items, default_keys=None): """Split a set of CWL output dictionaries into data samples and CWL items. Handles cases where we're arrayed on multiple things, like a set of regional VCF calls and data objects. """ key_lens = set([]) for data in items: key_lens.add(len(_get_all_cwlkeys([data], default_keys))) extra_key_len = min(list(key_lens)) if len(key_lens) > 1 else None data_out = [] extra_out = [] for data in items: if extra_key_len and len(_get_all_cwlkeys([data], default_keys)) == extra_key_len: extra_out.append(data) else: data_out.append(data) if len(extra_out) == 0: return data_out, {} else: cwl_keys = extra_out[0]["cwl_keys"] for extra in extra_out[1:]: cur_cwl_keys = extra["cwl_keys"] assert cur_cwl_keys == cwl_keys, pprint.pformat(extra_out) cwl_extras = collections.defaultdict(list) for data in items: for key in cwl_keys: cwl_extras[key].append(data[key]) data_final = [] for data in data_out: for key in cwl_keys: data.pop(key) data_final.append(data) return data_final, dict(cwl_extras)
python
def split_data_cwl_items(items, default_keys=None): """Split a set of CWL output dictionaries into data samples and CWL items. Handles cases where we're arrayed on multiple things, like a set of regional VCF calls and data objects. """ key_lens = set([]) for data in items: key_lens.add(len(_get_all_cwlkeys([data], default_keys))) extra_key_len = min(list(key_lens)) if len(key_lens) > 1 else None data_out = [] extra_out = [] for data in items: if extra_key_len and len(_get_all_cwlkeys([data], default_keys)) == extra_key_len: extra_out.append(data) else: data_out.append(data) if len(extra_out) == 0: return data_out, {} else: cwl_keys = extra_out[0]["cwl_keys"] for extra in extra_out[1:]: cur_cwl_keys = extra["cwl_keys"] assert cur_cwl_keys == cwl_keys, pprint.pformat(extra_out) cwl_extras = collections.defaultdict(list) for data in items: for key in cwl_keys: cwl_extras[key].append(data[key]) data_final = [] for data in data_out: for key in cwl_keys: data.pop(key) data_final.append(data) return data_final, dict(cwl_extras)
[ "def", "split_data_cwl_items", "(", "items", ",", "default_keys", "=", "None", ")", ":", "key_lens", "=", "set", "(", "[", "]", ")", "for", "data", "in", "items", ":", "key_lens", ".", "add", "(", "len", "(", "_get_all_cwlkeys", "(", "[", "data", "]", ...
Split a set of CWL output dictionaries into data samples and CWL items. Handles cases where we're arrayed on multiple things, like a set of regional VCF calls and data objects.
[ "Split", "a", "set", "of", "CWL", "output", "dictionaries", "into", "data", "samples", "and", "CWL", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L135-L168
223,490
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
samples_to_records
def samples_to_records(samples, default_keys=None): """Convert samples into output CWL records. """ from bcbio.pipeline import run_info RECORD_CONVERT_TO_LIST = set(["config__algorithm__tools_on", "config__algorithm__tools_off", "reference__genome_context"]) all_keys = _get_all_cwlkeys(samples, default_keys) out = [] for data in samples: for raw_key in sorted(list(all_keys)): key = raw_key.split("__") if tz.get_in(key, data) is None: data = tz.update_in(data, key, lambda x: None) if raw_key not in data["cwl_keys"]: data["cwl_keys"].append(raw_key) if raw_key in RECORD_CONVERT_TO_LIST: val = tz.get_in(key, data) if not val: val = [] elif not isinstance(val, (list, tuple)): val = [val] data = tz.update_in(data, key, lambda x: val) # Booleans are problematic for CWL serialization, convert into string representation if isinstance(tz.get_in(key, data), bool): data = tz.update_in(data, key, lambda x: str(tz.get_in(key, data))) data["metadata"] = run_info.add_metadata_defaults(data.get("metadata", {})) out.append(data) return out
python
def samples_to_records(samples, default_keys=None): """Convert samples into output CWL records. """ from bcbio.pipeline import run_info RECORD_CONVERT_TO_LIST = set(["config__algorithm__tools_on", "config__algorithm__tools_off", "reference__genome_context"]) all_keys = _get_all_cwlkeys(samples, default_keys) out = [] for data in samples: for raw_key in sorted(list(all_keys)): key = raw_key.split("__") if tz.get_in(key, data) is None: data = tz.update_in(data, key, lambda x: None) if raw_key not in data["cwl_keys"]: data["cwl_keys"].append(raw_key) if raw_key in RECORD_CONVERT_TO_LIST: val = tz.get_in(key, data) if not val: val = [] elif not isinstance(val, (list, tuple)): val = [val] data = tz.update_in(data, key, lambda x: val) # Booleans are problematic for CWL serialization, convert into string representation if isinstance(tz.get_in(key, data), bool): data = tz.update_in(data, key, lambda x: str(tz.get_in(key, data))) data["metadata"] = run_info.add_metadata_defaults(data.get("metadata", {})) out.append(data) return out
[ "def", "samples_to_records", "(", "samples", ",", "default_keys", "=", "None", ")", ":", "from", "bcbio", ".", "pipeline", "import", "run_info", "RECORD_CONVERT_TO_LIST", "=", "set", "(", "[", "\"config__algorithm__tools_on\"", ",", "\"config__algorithm__tools_off\"", ...
Convert samples into output CWL records.
[ "Convert", "samples", "into", "output", "CWL", "records", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L170-L195
223,491
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
assign_complex_to_samples
def assign_complex_to_samples(items): """Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings). """ extract_fns = {("variants", "samples"): _get_vcf_samples, ("align_bam",): _get_bam_samples} complex = {k: {} for k in extract_fns.keys()} for data in items: for k in complex: v = tz.get_in(k, data) if v is not None: for s in extract_fns[k](v, items): if s: complex[k][s] = v out = [] for data in items: for k in complex: newv = tz.get_in([k, dd.get_sample_name(data)], complex) if newv: data = tz.update_in(data, k, lambda x: newv) out.append(data) return out
python
def assign_complex_to_samples(items): """Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings). """ extract_fns = {("variants", "samples"): _get_vcf_samples, ("align_bam",): _get_bam_samples} complex = {k: {} for k in extract_fns.keys()} for data in items: for k in complex: v = tz.get_in(k, data) if v is not None: for s in extract_fns[k](v, items): if s: complex[k][s] = v out = [] for data in items: for k in complex: newv = tz.get_in([k, dd.get_sample_name(data)], complex) if newv: data = tz.update_in(data, k, lambda x: newv) out.append(data) return out
[ "def", "assign_complex_to_samples", "(", "items", ")", ":", "extract_fns", "=", "{", "(", "\"variants\"", ",", "\"samples\"", ")", ":", "_get_vcf_samples", ",", "(", "\"align_bam\"", ",", ")", ":", "_get_bam_samples", "}", "complex", "=", "{", "k", ":", "{",...
Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings).
[ "Assign", "complex", "inputs", "like", "variants", "and", "align", "outputs", "to", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L197-L225
223,492
bcbio/bcbio-nextgen
bcbio/pipeline/variation.py
_normalize_vc_input
def _normalize_vc_input(data): """Normalize different types of variant calling inputs. Handles standard and ensemble inputs. """ if data.get("ensemble"): for k in ["batch_samples", "validate", "vrn_file"]: data[k] = data["ensemble"][k] data["config"]["algorithm"]["variantcaller"] = "ensemble" data["metadata"] = {"batch": data["ensemble"]["batch_id"]} return data
python
def _normalize_vc_input(data): """Normalize different types of variant calling inputs. Handles standard and ensemble inputs. """ if data.get("ensemble"): for k in ["batch_samples", "validate", "vrn_file"]: data[k] = data["ensemble"][k] data["config"]["algorithm"]["variantcaller"] = "ensemble" data["metadata"] = {"batch": data["ensemble"]["batch_id"]} return data
[ "def", "_normalize_vc_input", "(", "data", ")", ":", "if", "data", ".", "get", "(", "\"ensemble\"", ")", ":", "for", "k", "in", "[", "\"batch_samples\"", ",", "\"validate\"", ",", "\"vrn_file\"", "]", ":", "data", "[", "k", "]", "=", "data", "[", "\"en...
Normalize different types of variant calling inputs. Handles standard and ensemble inputs.
[ "Normalize", "different", "types", "of", "variant", "calling", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/variation.py#L65-L75
223,493
bcbio/bcbio-nextgen
bcbio/pipeline/variation.py
_get_orig_items
def _get_orig_items(data): """Retrieve original items in a batch, handling CWL and standard cases. """ if isinstance(data, dict): if dd.get_align_bam(data) and tz.get_in(["metadata", "batch"], data) and "group_orig" in data: return vmulti.get_orig_items(data) else: return [data] else: return data
python
def _get_orig_items(data): """Retrieve original items in a batch, handling CWL and standard cases. """ if isinstance(data, dict): if dd.get_align_bam(data) and tz.get_in(["metadata", "batch"], data) and "group_orig" in data: return vmulti.get_orig_items(data) else: return [data] else: return data
[ "def", "_get_orig_items", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "if", "dd", ".", "get_align_bam", "(", "data", ")", "and", "tz", ".", "get_in", "(", "[", "\"metadata\"", ",", "\"batch\"", "]", ",", "data", ")"...
Retrieve original items in a batch, handling CWL and standard cases.
[ "Retrieve", "original", "items", "in", "a", "batch", "handling", "CWL", "and", "standard", "cases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/variation.py#L129-L138
223,494
bcbio/bcbio-nextgen
bcbio/pipeline/variation.py
_symlink_to_workdir
def _symlink_to_workdir(data, key): """For CWL support, symlink files into a working directory if in read-only imports. """ orig_file = tz.get_in(key, data) if orig_file and not orig_file.startswith(dd.get_work_dir(data)): variantcaller = genotype.get_variantcaller(data, require_bam=False) if not variantcaller: variantcaller = "precalled" out_file = os.path.join(dd.get_work_dir(data), variantcaller, os.path.basename(orig_file)) utils.safe_makedir(os.path.dirname(out_file)) utils.symlink_plus(orig_file, out_file) data = tz.update_in(data, key, lambda x: out_file) return data
python
def _symlink_to_workdir(data, key): """For CWL support, symlink files into a working directory if in read-only imports. """ orig_file = tz.get_in(key, data) if orig_file and not orig_file.startswith(dd.get_work_dir(data)): variantcaller = genotype.get_variantcaller(data, require_bam=False) if not variantcaller: variantcaller = "precalled" out_file = os.path.join(dd.get_work_dir(data), variantcaller, os.path.basename(orig_file)) utils.safe_makedir(os.path.dirname(out_file)) utils.symlink_plus(orig_file, out_file) data = tz.update_in(data, key, lambda x: out_file) return data
[ "def", "_symlink_to_workdir", "(", "data", ",", "key", ")", ":", "orig_file", "=", "tz", ".", "get_in", "(", "key", ",", "data", ")", "if", "orig_file", "and", "not", "orig_file", ".", "startswith", "(", "dd", ".", "get_work_dir", "(", "data", ")", ")"...
For CWL support, symlink files into a working directory if in read-only imports.
[ "For", "CWL", "support", "symlink", "files", "into", "a", "working", "directory", "if", "in", "read", "-", "only", "imports", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/variation.py#L140-L152
223,495
bcbio/bcbio-nextgen
bcbio/pipeline/variation.py
_get_batch_representative
def _get_batch_representative(items, key): """Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file. """ if isinstance(items, dict): return items, items else: vals = set([]) out = [] for data in items: if key in data: vals.add(data[key]) out.append(data) if len(vals) != 1: raise ValueError("Incorrect values for %s: %s" % (key, list(vals))) return out[0], items
python
def _get_batch_representative(items, key): """Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file. """ if isinstance(items, dict): return items, items else: vals = set([]) out = [] for data in items: if key in data: vals.add(data[key]) out.append(data) if len(vals) != 1: raise ValueError("Incorrect values for %s: %s" % (key, list(vals))) return out[0], items
[ "def", "_get_batch_representative", "(", "items", ",", "key", ")", ":", "if", "isinstance", "(", "items", ",", "dict", ")", ":", "return", "items", ",", "items", "else", ":", "vals", "=", "set", "(", "[", "]", ")", "out", "=", "[", "]", "for", "dat...
Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file.
[ "Retrieve", "a", "representative", "data", "item", "from", "a", "batch", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/variation.py#L154-L171
223,496
bcbio/bcbio-nextgen
bcbio/distributed/objectstore.py
_get_storage_manager
def _get_storage_manager(resource): """Return a storage manager which can process this resource.""" for manager in (AmazonS3, ArvadosKeep, SevenBridges, DNAnexus, AzureBlob, GoogleCloud, RegularServer): if manager.check_resource(resource): return manager() raise ValueError("Unexpected object store %(resource)s" % {"resource": resource})
python
def _get_storage_manager(resource): """Return a storage manager which can process this resource.""" for manager in (AmazonS3, ArvadosKeep, SevenBridges, DNAnexus, AzureBlob, GoogleCloud, RegularServer): if manager.check_resource(resource): return manager() raise ValueError("Unexpected object store %(resource)s" % {"resource": resource})
[ "def", "_get_storage_manager", "(", "resource", ")", ":", "for", "manager", "in", "(", "AmazonS3", ",", "ArvadosKeep", ",", "SevenBridges", ",", "DNAnexus", ",", "AzureBlob", ",", "GoogleCloud", ",", "RegularServer", ")", ":", "if", "manager", ".", "check_reso...
Return a storage manager which can process this resource.
[ "Return", "a", "storage", "manager", "which", "can", "process", "this", "resource", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/objectstore.py#L624-L631
223,497
bcbio/bcbio-nextgen
bcbio/distributed/objectstore.py
default_region
def default_region(fname): """Return the default region for the received resource. Note: This feature is available only for AmazonS3 storage manager. """ manager = _get_storage_manager(fname) if hasattr(manager, "get_region"): return manager.get_region() raise NotImplementedError("Unexpected object store %s" % fname)
python
def default_region(fname): """Return the default region for the received resource. Note: This feature is available only for AmazonS3 storage manager. """ manager = _get_storage_manager(fname) if hasattr(manager, "get_region"): return manager.get_region() raise NotImplementedError("Unexpected object store %s" % fname)
[ "def", "default_region", "(", "fname", ")", ":", "manager", "=", "_get_storage_manager", "(", "fname", ")", "if", "hasattr", "(", "manager", ",", "\"get_region\"", ")", ":", "return", "manager", ".", "get_region", "(", ")", "raise", "NotImplementedError", "(",...
Return the default region for the received resource. Note: This feature is available only for AmazonS3 storage manager.
[ "Return", "the", "default", "region", "for", "the", "received", "resource", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/objectstore.py#L651-L661
223,498
bcbio/bcbio-nextgen
bcbio/distributed/objectstore.py
BlobHandle.blob_properties
def blob_properties(self): """Returns all user-defined metadata, standard HTTP properties, and system properties for the blob. """ if not self._blob_properties: self._blob_properties = self._blob_service.get_blob_properties( container_name=self._container_name, blob_name=self._blob_name) return self._blob_properties
python
def blob_properties(self): """Returns all user-defined metadata, standard HTTP properties, and system properties for the blob. """ if not self._blob_properties: self._blob_properties = self._blob_service.get_blob_properties( container_name=self._container_name, blob_name=self._blob_name) return self._blob_properties
[ "def", "blob_properties", "(", "self", ")", ":", "if", "not", "self", ".", "_blob_properties", ":", "self", ".", "_blob_properties", "=", "self", ".", "_blob_service", ".", "get_blob_properties", "(", "container_name", "=", "self", ".", "_container_name", ",", ...
Returns all user-defined metadata, standard HTTP properties, and system properties for the blob.
[ "Returns", "all", "user", "-", "defined", "metadata", "standard", "HTTP", "properties", "and", "system", "properties", "for", "the", "blob", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/objectstore.py#L154-L162
223,499
bcbio/bcbio-nextgen
bcbio/distributed/objectstore.py
BlobHandle._chunk_offsets
def _chunk_offsets(self): """Iterator over chunk offests.""" index = 0 blob_size = self.blob_properties.get('content-length') while index < blob_size: yield index index = index + self._chunk_size
python
def _chunk_offsets(self): """Iterator over chunk offests.""" index = 0 blob_size = self.blob_properties.get('content-length') while index < blob_size: yield index index = index + self._chunk_size
[ "def", "_chunk_offsets", "(", "self", ")", ":", "index", "=", "0", "blob_size", "=", "self", ".", "blob_properties", ".", "get", "(", "'content-length'", ")", "while", "index", "<", "blob_size", ":", "yield", "index", "index", "=", "index", "+", "self", ...
Iterator over chunk offests.
[ "Iterator", "over", "chunk", "offests", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/objectstore.py#L164-L170