id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
223,600 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _find_input_files | def _find_input_files(var, out):
"""Find input files within the given CWL object.
"""
if isinstance(var, (list, tuple)):
for x in var:
out = _find_input_files(x, out)
elif isinstance(var, dict):
if var.get("class") == "File":
out.append(var["path"])
out = _find_input_files(var.get("secondaryFiles", []), out)
for key, val in var.items():
out = _find_input_files(val, out)
return out | python | def _find_input_files(var, out):
"""Find input files within the given CWL object.
"""
if isinstance(var, (list, tuple)):
for x in var:
out = _find_input_files(x, out)
elif isinstance(var, dict):
if var.get("class") == "File":
out.append(var["path"])
out = _find_input_files(var.get("secondaryFiles", []), out)
for key, val in var.items():
out = _find_input_files(val, out)
return out | [
"def",
"_find_input_files",
"(",
"var",
",",
"out",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"x",
"in",
"var",
":",
"out",
"=",
"_find_input_files",
"(",
"x",
",",
"out",
")",
"elif",
"isinstan... | Find input files within the given CWL object. | [
"Find",
"input",
"files",
"within",
"the",
"given",
"CWL",
"object",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L209-L221 |
223,601 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _read_from_cwlinput | def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys):
"""Read data records from a JSON dump of inputs. Avoids command line flattening of records.
"""
with open(in_file) as in_handle:
inputs = json.load(in_handle)
items_by_key = {}
input_files = []
passed_keys = set([])
for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))):
if key.endswith("_toolinput"):
key = key.replace("_toolinput", "")
if input_order[key] == "record":
cur_keys, items = _read_cwl_record(input_val)
passed_keys |= cur_keys
items_by_key[key] = items
else:
items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val)
input_files = _find_input_files(input_val, input_files)
prepped = _merge_cwlinputs(items_by_key, input_order, parallel)
out = []
for data in prepped:
if isinstance(data, (list, tuple)):
out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys),
output_cwl_keys, runtime) for x in data])
else:
out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime))
return out, input_files | python | def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys):
"""Read data records from a JSON dump of inputs. Avoids command line flattening of records.
"""
with open(in_file) as in_handle:
inputs = json.load(in_handle)
items_by_key = {}
input_files = []
passed_keys = set([])
for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))):
if key.endswith("_toolinput"):
key = key.replace("_toolinput", "")
if input_order[key] == "record":
cur_keys, items = _read_cwl_record(input_val)
passed_keys |= cur_keys
items_by_key[key] = items
else:
items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val)
input_files = _find_input_files(input_val, input_files)
prepped = _merge_cwlinputs(items_by_key, input_order, parallel)
out = []
for data in prepped:
if isinstance(data, (list, tuple)):
out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys),
output_cwl_keys, runtime) for x in data])
else:
out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime))
return out, input_files | [
"def",
"_read_from_cwlinput",
"(",
"in_file",
",",
"work_dir",
",",
"runtime",
",",
"parallel",
",",
"input_order",
",",
"output_cwl_keys",
")",
":",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"inputs",
"=",
"json",
".",
"load",
"(",
"in_... | Read data records from a JSON dump of inputs. Avoids command line flattening of records. | [
"Read",
"data",
"records",
"from",
"a",
"JSON",
"dump",
"of",
"inputs",
".",
"Avoids",
"command",
"line",
"flattening",
"of",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L223-L249 |
223,602 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _maybe_nest_bare_single | def _maybe_nest_bare_single(items_by_key, parallel):
"""Nest single inputs to avoid confusing single items and lists like files.
"""
if (parallel == "multi-parallel" and
(sum([1 for x in items_by_key.values() if not _is_nested_item(x)]) >=
sum([1 for x in items_by_key.values() if _is_nested_item(x)]))):
out = {}
for k, v in items_by_key.items():
out[k] = [v]
return out
else:
return items_by_key | python | def _maybe_nest_bare_single(items_by_key, parallel):
"""Nest single inputs to avoid confusing single items and lists like files.
"""
if (parallel == "multi-parallel" and
(sum([1 for x in items_by_key.values() if not _is_nested_item(x)]) >=
sum([1 for x in items_by_key.values() if _is_nested_item(x)]))):
out = {}
for k, v in items_by_key.items():
out[k] = [v]
return out
else:
return items_by_key | [
"def",
"_maybe_nest_bare_single",
"(",
"items_by_key",
",",
"parallel",
")",
":",
"if",
"(",
"parallel",
"==",
"\"multi-parallel\"",
"and",
"(",
"sum",
"(",
"[",
"1",
"for",
"x",
"in",
"items_by_key",
".",
"values",
"(",
")",
"if",
"not",
"_is_nested_item",
... | Nest single inputs to avoid confusing single items and lists like files. | [
"Nest",
"single",
"inputs",
"to",
"avoid",
"confusing",
"single",
"items",
"and",
"lists",
"like",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L254-L265 |
223,603 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _check_for_single_nested | def _check_for_single_nested(target, items_by_key, input_order):
"""Check for single nested inputs that match our target count and unnest.
Handles complex var inputs where some have an extra layer of nesting.
"""
out = utils.deepish_copy(items_by_key)
for (k, t) in input_order.items():
if t == "var":
v = items_by_key[tuple(k.split("__"))]
if _is_nested_single(v, target):
out[tuple(k.split("__"))] = v[0]
return out | python | def _check_for_single_nested(target, items_by_key, input_order):
"""Check for single nested inputs that match our target count and unnest.
Handles complex var inputs where some have an extra layer of nesting.
"""
out = utils.deepish_copy(items_by_key)
for (k, t) in input_order.items():
if t == "var":
v = items_by_key[tuple(k.split("__"))]
if _is_nested_single(v, target):
out[tuple(k.split("__"))] = v[0]
return out | [
"def",
"_check_for_single_nested",
"(",
"target",
",",
"items_by_key",
",",
"input_order",
")",
":",
"out",
"=",
"utils",
".",
"deepish_copy",
"(",
"items_by_key",
")",
"for",
"(",
"k",
",",
"t",
")",
"in",
"input_order",
".",
"items",
"(",
")",
":",
"if... | Check for single nested inputs that match our target count and unnest.
Handles complex var inputs where some have an extra layer of nesting. | [
"Check",
"for",
"single",
"nested",
"inputs",
"that",
"match",
"our",
"target",
"count",
"and",
"unnest",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L273-L284 |
223,604 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _concat_records | def _concat_records(items_by_key, input_order):
"""Concatenate records into a single key to avoid merging.
Handles heterogeneous records that will then be sorted out in
the processing fuction.
"""
all_records = []
for (k, t) in input_order.items():
if t == "record":
all_records.append(k)
out_items_by_key = utils.deepish_copy(items_by_key)
out_input_order = utils.deepish_copy(input_order)
if len(all_records) > 1:
final_k = all_records[0]
final_v = items_by_key[final_k]
for k in all_records[1:]:
final_v += items_by_key[k]
del out_items_by_key[k]
del out_input_order[k]
out_items_by_key[final_k] = final_v
return out_items_by_key, out_input_order | python | def _concat_records(items_by_key, input_order):
"""Concatenate records into a single key to avoid merging.
Handles heterogeneous records that will then be sorted out in
the processing fuction.
"""
all_records = []
for (k, t) in input_order.items():
if t == "record":
all_records.append(k)
out_items_by_key = utils.deepish_copy(items_by_key)
out_input_order = utils.deepish_copy(input_order)
if len(all_records) > 1:
final_k = all_records[0]
final_v = items_by_key[final_k]
for k in all_records[1:]:
final_v += items_by_key[k]
del out_items_by_key[k]
del out_input_order[k]
out_items_by_key[final_k] = final_v
return out_items_by_key, out_input_order | [
"def",
"_concat_records",
"(",
"items_by_key",
",",
"input_order",
")",
":",
"all_records",
"=",
"[",
"]",
"for",
"(",
"k",
",",
"t",
")",
"in",
"input_order",
".",
"items",
"(",
")",
":",
"if",
"t",
"==",
"\"record\"",
":",
"all_records",
".",
"append... | Concatenate records into a single key to avoid merging.
Handles heterogeneous records that will then be sorted out in
the processing fuction. | [
"Concatenate",
"records",
"into",
"a",
"single",
"key",
"to",
"avoid",
"merging",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L286-L306 |
223,605 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _nest_vars_in_rec | def _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel):
"""Nest multiple variable inputs into a single record or list of batch records.
Custom CWL implementations extract and merge these.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
rec_items = list(rec_items)[0]
if ((rec_items == 1 and var_items > 1) or parallel.startswith("batch")):
num_items = set([rec_items])
for var_key in (k for (k, t) in input_order.items() if t != "record"):
var_key = tuple(var_key.split("__"))
items_by_key[var_key] = [items_by_key[var_key]] * rec_items
else:
assert var_items == rec_items, (var_items, rec_items)
return items_by_key, num_items | python | def _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel):
"""Nest multiple variable inputs into a single record or list of batch records.
Custom CWL implementations extract and merge these.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
rec_items = list(rec_items)[0]
if ((rec_items == 1 and var_items > 1) or parallel.startswith("batch")):
num_items = set([rec_items])
for var_key in (k for (k, t) in input_order.items() if t != "record"):
var_key = tuple(var_key.split("__"))
items_by_key[var_key] = [items_by_key[var_key]] * rec_items
else:
assert var_items == rec_items, (var_items, rec_items)
return items_by_key, num_items | [
"def",
"_nest_vars_in_rec",
"(",
"var_items",
",",
"rec_items",
",",
"input_order",
",",
"items_by_key",
",",
"parallel",
")",
":",
"num_items",
"=",
"var_items",
"var_items",
"=",
"list",
"(",
"var_items",
")",
"[",
"0",
"]",
"if",
"rec_items",
":",
"rec_it... | Nest multiple variable inputs into a single record or list of batch records.
Custom CWL implementations extract and merge these. | [
"Nest",
"multiple",
"variable",
"inputs",
"into",
"a",
"single",
"record",
"or",
"list",
"of",
"batch",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L364-L380 |
223,606 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _expand_rec_to_vars | def _expand_rec_to_vars(var_items, rec_items, input_order, items_by_key, parallel):
"""Expand record to apply to number of variants.
Alternative approach to _nest_vars_in_rec
to combining a single record with multiple variants.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
for rec_key in (k for (k, t) in input_order.items() if t == "record"):
rec_vals = items_by_key[rec_key]
if len(rec_vals) == 1 and var_items > 1:
items_by_key[rec_key] = rec_vals * var_items
else:
assert var_items == len(rec_vals), (var_items, rec_vals)
return items_by_key, num_items | python | def _expand_rec_to_vars(var_items, rec_items, input_order, items_by_key, parallel):
"""Expand record to apply to number of variants.
Alternative approach to _nest_vars_in_rec
to combining a single record with multiple variants.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
for rec_key in (k for (k, t) in input_order.items() if t == "record"):
rec_vals = items_by_key[rec_key]
if len(rec_vals) == 1 and var_items > 1:
items_by_key[rec_key] = rec_vals * var_items
else:
assert var_items == len(rec_vals), (var_items, rec_vals)
return items_by_key, num_items | [
"def",
"_expand_rec_to_vars",
"(",
"var_items",
",",
"rec_items",
",",
"input_order",
",",
"items_by_key",
",",
"parallel",
")",
":",
"num_items",
"=",
"var_items",
"var_items",
"=",
"list",
"(",
"var_items",
")",
"[",
"0",
"]",
"if",
"rec_items",
":",
"for"... | Expand record to apply to number of variants.
Alternative approach to _nest_vars_in_rec
to combining a single record with multiple variants. | [
"Expand",
"record",
"to",
"apply",
"to",
"number",
"of",
"variants",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L382-L397 |
223,607 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _read_cwl_record | def _read_cwl_record(rec):
"""Read CWL records, handling multiple nesting and batching cases.
"""
keys = set([])
out = []
if isinstance(rec, dict):
is_batched = all([isinstance(v, (list, tuple)) for v in rec.values()])
cur = [{} for _ in range(len(rec.values()[0]) if is_batched else 1)]
for k in rec.keys():
keys.add(k)
val = rec[k]
val = val if is_batched else [val]
for i, v in enumerate(val):
v = _cwlvar_to_wdl(v)
cur[i] = _update_nested(k.split("__"), v, cur[i])
if is_batched:
out.append(cur)
else:
assert len(cur) == 1
out.append(cur[0])
else:
assert isinstance(rec, (list, tuple))
for sub_rec in rec:
sub_keys, sub_out = _read_cwl_record(sub_rec)
keys |= sub_keys
out.append(sub_out)
return keys, out | python | def _read_cwl_record(rec):
"""Read CWL records, handling multiple nesting and batching cases.
"""
keys = set([])
out = []
if isinstance(rec, dict):
is_batched = all([isinstance(v, (list, tuple)) for v in rec.values()])
cur = [{} for _ in range(len(rec.values()[0]) if is_batched else 1)]
for k in rec.keys():
keys.add(k)
val = rec[k]
val = val if is_batched else [val]
for i, v in enumerate(val):
v = _cwlvar_to_wdl(v)
cur[i] = _update_nested(k.split("__"), v, cur[i])
if is_batched:
out.append(cur)
else:
assert len(cur) == 1
out.append(cur[0])
else:
assert isinstance(rec, (list, tuple))
for sub_rec in rec:
sub_keys, sub_out = _read_cwl_record(sub_rec)
keys |= sub_keys
out.append(sub_out)
return keys, out | [
"def",
"_read_cwl_record",
"(",
"rec",
")",
":",
"keys",
"=",
"set",
"(",
"[",
"]",
")",
"out",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"rec",
",",
"dict",
")",
":",
"is_batched",
"=",
"all",
"(",
"[",
"isinstance",
"(",
"v",
",",
"(",
"list",
... | Read CWL records, handling multiple nesting and batching cases. | [
"Read",
"CWL",
"records",
"handling",
"multiple",
"nesting",
"and",
"batching",
"cases",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L399-L425 |
223,608 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _finalize_cwl_in | def _finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime):
"""Finalize data object with inputs from CWL.
"""
data["dirs"] = {"work": work_dir}
if not tz.get_in(["config", "algorithm"], data):
if "config" not in data:
data["config"] = {}
data["config"]["algorithm"] = {}
if "rgnames" not in data and "description" in data:
data["rgnames"] = {"sample": data["description"]}
data["cwl_keys"] = passed_keys
data["output_cwl_keys"] = output_cwl_keys
data = _add_resources(data, runtime)
data = cwlutils.normalize_missing(data)
data = run_info.normalize_world(data)
return data | python | def _finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime):
"""Finalize data object with inputs from CWL.
"""
data["dirs"] = {"work": work_dir}
if not tz.get_in(["config", "algorithm"], data):
if "config" not in data:
data["config"] = {}
data["config"]["algorithm"] = {}
if "rgnames" not in data and "description" in data:
data["rgnames"] = {"sample": data["description"]}
data["cwl_keys"] = passed_keys
data["output_cwl_keys"] = output_cwl_keys
data = _add_resources(data, runtime)
data = cwlutils.normalize_missing(data)
data = run_info.normalize_world(data)
return data | [
"def",
"_finalize_cwl_in",
"(",
"data",
",",
"work_dir",
",",
"passed_keys",
",",
"output_cwl_keys",
",",
"runtime",
")",
":",
"data",
"[",
"\"dirs\"",
"]",
"=",
"{",
"\"work\"",
":",
"work_dir",
"}",
"if",
"not",
"tz",
".",
"get_in",
"(",
"[",
"\"config... | Finalize data object with inputs from CWL. | [
"Finalize",
"data",
"object",
"with",
"inputs",
"from",
"CWL",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L427-L442 |
223,609 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _convert_value | def _convert_value(val):
"""Handle multiple input type values.
"""
def _is_number(x, op):
try:
op(x)
return True
except ValueError:
return False
if isinstance(val, (list, tuple)):
return [_convert_value(x) for x in val]
elif val is None:
return val
elif _is_number(val, int):
return int(val)
elif _is_number(val, float):
return float(val)
elif val.find(";;") >= 0:
return [_convert_value(v) for v in val.split(";;")]
elif val.startswith(("{", "[")):
# Can get ugly JSON output from CWL with unicode and ' instead of "
# This tries to fix it so parsed correctly by json loader
return json.loads(val.replace("u'", "'").replace("'", '"'))
elif val.lower() == "true":
return True
elif val.lower() == "false":
return False
else:
return val | python | def _convert_value(val):
"""Handle multiple input type values.
"""
def _is_number(x, op):
try:
op(x)
return True
except ValueError:
return False
if isinstance(val, (list, tuple)):
return [_convert_value(x) for x in val]
elif val is None:
return val
elif _is_number(val, int):
return int(val)
elif _is_number(val, float):
return float(val)
elif val.find(";;") >= 0:
return [_convert_value(v) for v in val.split(";;")]
elif val.startswith(("{", "[")):
# Can get ugly JSON output from CWL with unicode and ' instead of "
# This tries to fix it so parsed correctly by json loader
return json.loads(val.replace("u'", "'").replace("'", '"'))
elif val.lower() == "true":
return True
elif val.lower() == "false":
return False
else:
return val | [
"def",
"_convert_value",
"(",
"val",
")",
":",
"def",
"_is_number",
"(",
"x",
",",
"op",
")",
":",
"try",
":",
"op",
"(",
"x",
")",
"return",
"True",
"except",
"ValueError",
":",
"return",
"False",
"if",
"isinstance",
"(",
"val",
",",
"(",
"list",
... | Handle multiple input type values. | [
"Handle",
"multiple",
"input",
"type",
"values",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L444-L472 |
223,610 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _get_output_cwl_keys | def _get_output_cwl_keys(fnargs):
"""Retrieve output_cwl_keys from potentially nested input arguments.
"""
for d in utils.flatten(fnargs):
if isinstance(d, dict) and d.get("output_cwl_keys"):
return d["output_cwl_keys"]
raise ValueError("Did not find output_cwl_keys in %s" % (pprint.pformat(fnargs))) | python | def _get_output_cwl_keys(fnargs):
"""Retrieve output_cwl_keys from potentially nested input arguments.
"""
for d in utils.flatten(fnargs):
if isinstance(d, dict) and d.get("output_cwl_keys"):
return d["output_cwl_keys"]
raise ValueError("Did not find output_cwl_keys in %s" % (pprint.pformat(fnargs))) | [
"def",
"_get_output_cwl_keys",
"(",
"fnargs",
")",
":",
"for",
"d",
"in",
"utils",
".",
"flatten",
"(",
"fnargs",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
"and",
"d",
".",
"get",
"(",
"\"output_cwl_keys\"",
")",
":",
"return",
"d",
"... | Retrieve output_cwl_keys from potentially nested input arguments. | [
"Retrieve",
"output_cwl_keys",
"from",
"potentially",
"nested",
"input",
"arguments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L492-L498 |
223,611 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _combine_cwl_records | def _combine_cwl_records(recs, record_name, parallel):
"""Provide a list of nexted CWL records keyed by output key.
Handles batches, where we return a list of records, and single items
where we return one record.
"""
if parallel not in ["multi-batch", "single-split", "multi-combined", "batch-single"]:
assert len(recs) == 1, pprint.pformat(recs)
return {record_name: recs[0]}
else:
return {record_name: recs} | python | def _combine_cwl_records(recs, record_name, parallel):
"""Provide a list of nexted CWL records keyed by output key.
Handles batches, where we return a list of records, and single items
where we return one record.
"""
if parallel not in ["multi-batch", "single-split", "multi-combined", "batch-single"]:
assert len(recs) == 1, pprint.pformat(recs)
return {record_name: recs[0]}
else:
return {record_name: recs} | [
"def",
"_combine_cwl_records",
"(",
"recs",
",",
"record_name",
",",
"parallel",
")",
":",
"if",
"parallel",
"not",
"in",
"[",
"\"multi-batch\"",
",",
"\"single-split\"",
",",
"\"multi-combined\"",
",",
"\"batch-single\"",
"]",
":",
"assert",
"len",
"(",
"recs",... | Provide a list of nexted CWL records keyed by output key.
Handles batches, where we return a list of records, and single items
where we return one record. | [
"Provide",
"a",
"list",
"of",
"nexted",
"CWL",
"records",
"keyed",
"by",
"output",
"key",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L500-L510 |
223,612 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _collapse_to_cwl_record_single | def _collapse_to_cwl_record_single(data, want_attrs, input_files):
"""Convert a single sample into a CWL record.
"""
out = {}
for key in want_attrs:
key_parts = key.split("__")
out[key] = _to_cwl(tz.get_in(key_parts, data), input_files)
return out | python | def _collapse_to_cwl_record_single(data, want_attrs, input_files):
"""Convert a single sample into a CWL record.
"""
out = {}
for key in want_attrs:
key_parts = key.split("__")
out[key] = _to_cwl(tz.get_in(key_parts, data), input_files)
return out | [
"def",
"_collapse_to_cwl_record_single",
"(",
"data",
",",
"want_attrs",
",",
"input_files",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
"in",
"want_attrs",
":",
"key_parts",
"=",
"key",
".",
"split",
"(",
"\"__\"",
")",
"out",
"[",
"key",
"]",
"=",
... | Convert a single sample into a CWL record. | [
"Convert",
"a",
"single",
"sample",
"into",
"a",
"CWL",
"record",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L512-L519 |
223,613 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _nested_cwl_record | def _nested_cwl_record(xs, want_attrs, input_files):
"""Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch.
"""
if isinstance(xs, (list, tuple)):
return [_nested_cwl_record(x, want_attrs, input_files) for x in xs]
else:
assert isinstance(xs, dict), pprint.pformat(xs)
return _collapse_to_cwl_record_single(xs, want_attrs, input_files) | python | def _nested_cwl_record(xs, want_attrs, input_files):
"""Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch.
"""
if isinstance(xs, (list, tuple)):
return [_nested_cwl_record(x, want_attrs, input_files) for x in xs]
else:
assert isinstance(xs, dict), pprint.pformat(xs)
return _collapse_to_cwl_record_single(xs, want_attrs, input_files) | [
"def",
"_nested_cwl_record",
"(",
"xs",
",",
"want_attrs",
",",
"input_files",
")",
":",
"if",
"isinstance",
"(",
"xs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"_nested_cwl_record",
"(",
"x",
",",
"want_attrs",
",",
"input_files",
"... | Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch. | [
"Convert",
"arbitrarily",
"nested",
"samples",
"into",
"a",
"nested",
"list",
"of",
"dictionaries",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L521-L532 |
223,614 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _collapse_to_cwl_record | def _collapse_to_cwl_record(samples, want_attrs, input_files):
"""Convert nested samples from batches into a CWL record, based on input keys.
"""
input_keys = sorted(list(set().union(*[d["cwl_keys"] for d in samples])), key=lambda x: (-len(x), tuple(x)))
out = {}
for key in input_keys:
if key in want_attrs:
key_parts = key.split("__")
vals = []
cur = []
for d in samples:
vals.append(_to_cwl(tz.get_in(key_parts, d), input_files))
# Remove nested keys to avoid specifying multiple times
cur.append(_dissoc_in(d, key_parts) if len(key_parts) > 1 else d)
samples = cur
out[key] = vals
return out | python | def _collapse_to_cwl_record(samples, want_attrs, input_files):
"""Convert nested samples from batches into a CWL record, based on input keys.
"""
input_keys = sorted(list(set().union(*[d["cwl_keys"] for d in samples])), key=lambda x: (-len(x), tuple(x)))
out = {}
for key in input_keys:
if key in want_attrs:
key_parts = key.split("__")
vals = []
cur = []
for d in samples:
vals.append(_to_cwl(tz.get_in(key_parts, d), input_files))
# Remove nested keys to avoid specifying multiple times
cur.append(_dissoc_in(d, key_parts) if len(key_parts) > 1 else d)
samples = cur
out[key] = vals
return out | [
"def",
"_collapse_to_cwl_record",
"(",
"samples",
",",
"want_attrs",
",",
"input_files",
")",
":",
"input_keys",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
")",
".",
"union",
"(",
"*",
"[",
"d",
"[",
"\"cwl_keys\"",
"]",
"for",
"d",
"in",
"samples",
... | Convert nested samples from batches into a CWL record, based on input keys. | [
"Convert",
"nested",
"samples",
"from",
"batches",
"into",
"a",
"CWL",
"record",
"based",
"on",
"input",
"keys",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L534-L550 |
223,615 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _file_and_exists | def _file_and_exists(val, input_files):
"""Check if an input is a file and exists.
Checks both locally (staged) and from input files (re-passed but never localized).
"""
return ((os.path.exists(val) and os.path.isfile(val)) or
val in input_files) | python | def _file_and_exists(val, input_files):
"""Check if an input is a file and exists.
Checks both locally (staged) and from input files (re-passed but never localized).
"""
return ((os.path.exists(val) and os.path.isfile(val)) or
val in input_files) | [
"def",
"_file_and_exists",
"(",
"val",
",",
"input_files",
")",
":",
"return",
"(",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"val",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"val",
")",
")",
"or",
"val",
"in",
"input_files",
")"
] | Check if an input is a file and exists.
Checks both locally (staged) and from input files (re-passed but never localized). | [
"Check",
"if",
"an",
"input",
"is",
"a",
"file",
"and",
"exists",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L552-L558 |
223,616 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _to_cwl | def _to_cwl(val, input_files):
"""Convert a value into CWL formatted JSON, handling files and complex things.
"""
if isinstance(val, six.string_types):
if _file_and_exists(val, input_files):
val = {"class": "File", "path": val}
secondary = []
for idx in [".bai", ".tbi", ".gbi", ".fai", ".crai", ".db"]:
idx_file = val["path"] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
for idx in [".dict"]:
idx_file = os.path.splitext(val["path"])[0] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
cur_dir, cur_file = os.path.split(val["path"])
# Handle relative paths
if not cur_dir:
cur_dir = os.getcwd()
if cur_file.endswith(cwlutils.DIR_TARGETS):
if os.path.exists(cur_dir):
for fname in os.listdir(cur_dir):
if fname != cur_file and not os.path.isdir(os.path.join(cur_dir, fname))\
and fname != 'sbg.worker.log':
secondary.append({"class": "File", "path": os.path.join(cur_dir, fname)})
else:
for f in input_files:
if f.startswith(cur_dir) and f != cur_file and not os.path.isdir(f):
secondary.append({"class": "File", "path": f})
if secondary:
val["secondaryFiles"] = _remove_duplicate_files(secondary)
elif isinstance(val, (list, tuple)):
val = [_to_cwl(x, input_files) for x in val]
elif isinstance(val, dict):
# File representation with secondary files
if "base" in val and "secondary" in val:
out = {"class": "File", "path": val["base"]}
secondary = [{"class": "File", "path": x} for x in val["secondary"] if not os.path.isdir(x)]
if secondary:
out["secondaryFiles"] = _remove_duplicate_files(secondary)
val = out
else:
val = json.dumps(val, sort_keys=True, separators=(',', ':'))
return val | python | def _to_cwl(val, input_files):
"""Convert a value into CWL formatted JSON, handling files and complex things.
"""
if isinstance(val, six.string_types):
if _file_and_exists(val, input_files):
val = {"class": "File", "path": val}
secondary = []
for idx in [".bai", ".tbi", ".gbi", ".fai", ".crai", ".db"]:
idx_file = val["path"] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
for idx in [".dict"]:
idx_file = os.path.splitext(val["path"])[0] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
cur_dir, cur_file = os.path.split(val["path"])
# Handle relative paths
if not cur_dir:
cur_dir = os.getcwd()
if cur_file.endswith(cwlutils.DIR_TARGETS):
if os.path.exists(cur_dir):
for fname in os.listdir(cur_dir):
if fname != cur_file and not os.path.isdir(os.path.join(cur_dir, fname))\
and fname != 'sbg.worker.log':
secondary.append({"class": "File", "path": os.path.join(cur_dir, fname)})
else:
for f in input_files:
if f.startswith(cur_dir) and f != cur_file and not os.path.isdir(f):
secondary.append({"class": "File", "path": f})
if secondary:
val["secondaryFiles"] = _remove_duplicate_files(secondary)
elif isinstance(val, (list, tuple)):
val = [_to_cwl(x, input_files) for x in val]
elif isinstance(val, dict):
# File representation with secondary files
if "base" in val and "secondary" in val:
out = {"class": "File", "path": val["base"]}
secondary = [{"class": "File", "path": x} for x in val["secondary"] if not os.path.isdir(x)]
if secondary:
out["secondaryFiles"] = _remove_duplicate_files(secondary)
val = out
else:
val = json.dumps(val, sort_keys=True, separators=(',', ':'))
return val | [
"def",
"_to_cwl",
"(",
"val",
",",
"input_files",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"if",
"_file_and_exists",
"(",
"val",
",",
"input_files",
")",
":",
"val",
"=",
"{",
"\"class\"",
":",
"\"File\"",
",... | Convert a value into CWL formatted JSON, handling files and complex things. | [
"Convert",
"a",
"value",
"into",
"CWL",
"formatted",
"JSON",
"handling",
"files",
"and",
"complex",
"things",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L560-L603 |
223,617 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _remove_duplicate_files | def _remove_duplicate_files(xs):
"""Remove files specified multiple times in a list.
"""
seen = set([])
out = []
for x in xs:
if x["path"] not in seen:
out.append(x)
seen.add(x["path"])
return out | python | def _remove_duplicate_files(xs):
"""Remove files specified multiple times in a list.
"""
seen = set([])
out = []
for x in xs:
if x["path"] not in seen:
out.append(x)
seen.add(x["path"])
return out | [
"def",
"_remove_duplicate_files",
"(",
"xs",
")",
":",
"seen",
"=",
"set",
"(",
"[",
"]",
")",
"out",
"=",
"[",
"]",
"for",
"x",
"in",
"xs",
":",
"if",
"x",
"[",
"\"path\"",
"]",
"not",
"in",
"seen",
":",
"out",
".",
"append",
"(",
"x",
")",
... | Remove files specified multiple times in a list. | [
"Remove",
"files",
"specified",
"multiple",
"times",
"in",
"a",
"list",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L605-L614 |
223,618 | bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _update_nested | def _update_nested(key, val, data, allow_overwriting=False):
"""Update the data object, avoiding over-writing with nested dictionaries.
"""
if isinstance(val, dict):
for sub_key, sub_val in val.items():
data = _update_nested(key + [sub_key], sub_val, data, allow_overwriting=allow_overwriting)
else:
already_there = tz.get_in(key, data) is not None
if already_there and val:
if not allow_overwriting:
raise ValueError("Duplicated key %s: %s and %s" % (key, val, tz.get_in(key, data)))
else:
already_there = False
if val or not already_there:
data = tz.update_in(data, key, lambda x: val)
return data | python | def _update_nested(key, val, data, allow_overwriting=False):
"""Update the data object, avoiding over-writing with nested dictionaries.
"""
if isinstance(val, dict):
for sub_key, sub_val in val.items():
data = _update_nested(key + [sub_key], sub_val, data, allow_overwriting=allow_overwriting)
else:
already_there = tz.get_in(key, data) is not None
if already_there and val:
if not allow_overwriting:
raise ValueError("Duplicated key %s: %s and %s" % (key, val, tz.get_in(key, data)))
else:
already_there = False
if val or not already_there:
data = tz.update_in(data, key, lambda x: val)
return data | [
"def",
"_update_nested",
"(",
"key",
",",
"val",
",",
"data",
",",
"allow_overwriting",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"for",
"sub_key",
",",
"sub_val",
"in",
"val",
".",
"items",
"(",
")",
":",
"data",... | Update the data object, avoiding over-writing with nested dictionaries. | [
"Update",
"the",
"data",
"object",
"avoiding",
"over",
"-",
"writing",
"with",
"nested",
"dictionaries",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L623-L638 |
223,619 | bcbio/bcbio-nextgen | bcbio/server/main.py | start | def start(args):
"""Run server with provided command line arguments.
"""
application = tornado.web.Application([(r"/run", run.get_handler(args)),
(r"/status", run.StatusHandler)])
application.runmonitor = RunMonitor()
application.listen(args.port)
tornado.ioloop.IOLoop.instance().start() | python | def start(args):
"""Run server with provided command line arguments.
"""
application = tornado.web.Application([(r"/run", run.get_handler(args)),
(r"/status", run.StatusHandler)])
application.runmonitor = RunMonitor()
application.listen(args.port)
tornado.ioloop.IOLoop.instance().start() | [
"def",
"start",
"(",
"args",
")",
":",
"application",
"=",
"tornado",
".",
"web",
".",
"Application",
"(",
"[",
"(",
"r\"/run\"",
",",
"run",
".",
"get_handler",
"(",
"args",
")",
")",
",",
"(",
"r\"/status\"",
",",
"run",
".",
"StatusHandler",
")",
... | Run server with provided command line arguments. | [
"Run",
"server",
"with",
"provided",
"command",
"line",
"arguments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/main.py#L8-L15 |
223,620 | bcbio/bcbio-nextgen | bcbio/server/main.py | add_subparser | def add_subparser(subparsers):
"""Add command line arguments as server subparser.
"""
parser = subparsers.add_parser("server", help="Run a bcbio-nextgen server allowing remote job execution.")
parser.add_argument("-c", "--config", help=("Global YAML configuration file specifying system details."
"Defaults to installed bcbio_system.yaml"))
parser.add_argument("-p", "--port", help="Port to listen on (default 8080)",
default=8080, type=int)
parser.add_argument("-n", "--cores", help="Cores to use when processing locally when not requested (default 1)",
default=1, type=int)
parser.add_argument("-d", "--biodata_dir", help="Directory with biological data",
default="/mnt/biodata", type=str)
return parser | python | def add_subparser(subparsers):
"""Add command line arguments as server subparser.
"""
parser = subparsers.add_parser("server", help="Run a bcbio-nextgen server allowing remote job execution.")
parser.add_argument("-c", "--config", help=("Global YAML configuration file specifying system details."
"Defaults to installed bcbio_system.yaml"))
parser.add_argument("-p", "--port", help="Port to listen on (default 8080)",
default=8080, type=int)
parser.add_argument("-n", "--cores", help="Cores to use when processing locally when not requested (default 1)",
default=1, type=int)
parser.add_argument("-d", "--biodata_dir", help="Directory with biological data",
default="/mnt/biodata", type=str)
return parser | [
"def",
"add_subparser",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"server\"",
",",
"help",
"=",
"\"Run a bcbio-nextgen server allowing remote job execution.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--conf... | Add command line arguments as server subparser. | [
"Add",
"command",
"line",
"arguments",
"as",
"server",
"subparser",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/main.py#L29-L41 |
223,621 | bcbio/bcbio-nextgen | bcbio/provenance/data.py | write_versions | def write_versions(dirs, items):
"""Write data versioning for genomes present in the configuration.
"""
genomes = {}
for d in items:
genomes[d["genome_build"]] = d.get("reference", {}).get("versions")
out_file = _get_out_file(dirs)
found_versions = False
if genomes and out_file:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["genome", "resource", "version"])
for genome, version_file in genomes.items():
if not version_file:
genome_dir = install.get_genome_dir(genome, dirs.get("galaxy"), items[0])
if genome_dir:
version_file = os.path.join(genome_dir, "versions.csv")
if version_file and os.path.exists(version_file):
found_versions = True
with open(version_file) as in_handle:
reader = csv.reader(in_handle)
for parts in reader:
if len(parts) >= 2:
resource, version = parts[:2]
writer.writerow([genome, resource, version])
if found_versions:
return out_file | python | def write_versions(dirs, items):
"""Write data versioning for genomes present in the configuration.
"""
genomes = {}
for d in items:
genomes[d["genome_build"]] = d.get("reference", {}).get("versions")
out_file = _get_out_file(dirs)
found_versions = False
if genomes and out_file:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["genome", "resource", "version"])
for genome, version_file in genomes.items():
if not version_file:
genome_dir = install.get_genome_dir(genome, dirs.get("galaxy"), items[0])
if genome_dir:
version_file = os.path.join(genome_dir, "versions.csv")
if version_file and os.path.exists(version_file):
found_versions = True
with open(version_file) as in_handle:
reader = csv.reader(in_handle)
for parts in reader:
if len(parts) >= 2:
resource, version = parts[:2]
writer.writerow([genome, resource, version])
if found_versions:
return out_file | [
"def",
"write_versions",
"(",
"dirs",
",",
"items",
")",
":",
"genomes",
"=",
"{",
"}",
"for",
"d",
"in",
"items",
":",
"genomes",
"[",
"d",
"[",
"\"genome_build\"",
"]",
"]",
"=",
"d",
".",
"get",
"(",
"\"reference\"",
",",
"{",
"}",
")",
".",
"... | Write data versioning for genomes present in the configuration. | [
"Write",
"data",
"versioning",
"for",
"genomes",
"present",
"in",
"the",
"configuration",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/data.py#L8-L34 |
223,622 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | combine_calls | def combine_calls(*args):
"""Combine multiple callsets into a final set of merged calls.
"""
if len(args) == 3:
is_cwl = False
batch_id, samples, data = args
caller_names, vrn_files = _organize_variants(samples, batch_id)
else:
is_cwl = True
samples = [utils.to_single_data(x) for x in args]
samples = [cwlutils.unpack_tarballs(x, x) for x in samples]
data = samples[0]
batch_id = data["batch_id"]
caller_names = data["variants"]["variantcallers"]
vrn_files = data["variants"]["calls"]
logger.info("Ensemble consensus calls for {0}: {1}".format(
batch_id, ",".join(caller_names)))
edata = copy.deepcopy(data)
base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id))
if any([vcfutils.vcf_has_variants(f) for f in vrn_files]):
# Decompose multiallelic variants and normalize
passonly = not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata, False)
vrn_files = [normalize.normalize(f, data, passonly=passonly, rerun_effects=False, remove_oldeffects=True,
nonrefonly=True,
work_dir=utils.safe_makedir(os.path.join(base_dir, c)))
for c, f in zip(caller_names, vrn_files)]
if "classifiers" not in (dd.get_ensemble(edata) or {}):
callinfo = _run_ensemble_intersection(batch_id, vrn_files, caller_names, base_dir, edata)
else:
config_file = _write_config_file(batch_id, caller_names, base_dir, edata)
callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir,
dd.get_ref_file(edata), edata)
callinfo["vrn_file"] = vcfutils.bgzip_and_index(callinfo["vrn_file"], data["config"])
# After decomposing multiallelic variants and normalizing, re-evaluate effects
ann_ma_file, _ = effects.add_to_vcf(callinfo["vrn_file"], data)
if ann_ma_file:
callinfo["vrn_file"] = ann_ma_file
edata["config"]["algorithm"]["variantcaller"] = "ensemble"
edata["vrn_file"] = callinfo["vrn_file"]
edata["ensemble_bed"] = callinfo["bed_file"]
callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate")
else:
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
vcfutils.write_empty_vcf(out_vcf_file, samples=[dd.get_sample_name(d) for d in samples])
callinfo = {"variantcaller": "ensemble",
"vrn_file": vcfutils.bgzip_and_index(out_vcf_file, data["config"]),
"bed_file": None}
if is_cwl:
callinfo["batch_samples"] = data["batch_samples"]
callinfo["batch_id"] = batch_id
return [{"ensemble": callinfo}]
else:
return [[batch_id, callinfo]] | python | def combine_calls(*args):
"""Combine multiple callsets into a final set of merged calls.
"""
if len(args) == 3:
is_cwl = False
batch_id, samples, data = args
caller_names, vrn_files = _organize_variants(samples, batch_id)
else:
is_cwl = True
samples = [utils.to_single_data(x) for x in args]
samples = [cwlutils.unpack_tarballs(x, x) for x in samples]
data = samples[0]
batch_id = data["batch_id"]
caller_names = data["variants"]["variantcallers"]
vrn_files = data["variants"]["calls"]
logger.info("Ensemble consensus calls for {0}: {1}".format(
batch_id, ",".join(caller_names)))
edata = copy.deepcopy(data)
base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id))
if any([vcfutils.vcf_has_variants(f) for f in vrn_files]):
# Decompose multiallelic variants and normalize
passonly = not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata, False)
vrn_files = [normalize.normalize(f, data, passonly=passonly, rerun_effects=False, remove_oldeffects=True,
nonrefonly=True,
work_dir=utils.safe_makedir(os.path.join(base_dir, c)))
for c, f in zip(caller_names, vrn_files)]
if "classifiers" not in (dd.get_ensemble(edata) or {}):
callinfo = _run_ensemble_intersection(batch_id, vrn_files, caller_names, base_dir, edata)
else:
config_file = _write_config_file(batch_id, caller_names, base_dir, edata)
callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir,
dd.get_ref_file(edata), edata)
callinfo["vrn_file"] = vcfutils.bgzip_and_index(callinfo["vrn_file"], data["config"])
# After decomposing multiallelic variants and normalizing, re-evaluate effects
ann_ma_file, _ = effects.add_to_vcf(callinfo["vrn_file"], data)
if ann_ma_file:
callinfo["vrn_file"] = ann_ma_file
edata["config"]["algorithm"]["variantcaller"] = "ensemble"
edata["vrn_file"] = callinfo["vrn_file"]
edata["ensemble_bed"] = callinfo["bed_file"]
callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate")
else:
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
vcfutils.write_empty_vcf(out_vcf_file, samples=[dd.get_sample_name(d) for d in samples])
callinfo = {"variantcaller": "ensemble",
"vrn_file": vcfutils.bgzip_and_index(out_vcf_file, data["config"]),
"bed_file": None}
if is_cwl:
callinfo["batch_samples"] = data["batch_samples"]
callinfo["batch_id"] = batch_id
return [{"ensemble": callinfo}]
else:
return [[batch_id, callinfo]] | [
"def",
"combine_calls",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"3",
":",
"is_cwl",
"=",
"False",
"batch_id",
",",
"samples",
",",
"data",
"=",
"args",
"caller_names",
",",
"vrn_files",
"=",
"_organize_variants",
"(",
"samples",
... | Combine multiple callsets into a final set of merged calls. | [
"Combine",
"multiple",
"callsets",
"into",
"a",
"final",
"set",
"of",
"merged",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L57-L110 |
223,623 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | combine_calls_parallel | def combine_calls_parallel(samples, run_parallel):
"""Combine calls using batched Ensemble approach.
"""
batch_groups, extras = _group_by_batches(samples, _has_ensemble)
out = []
if batch_groups:
processed = run_parallel("combine_calls", ((b, xs, xs[0]) for b, xs in batch_groups.items()))
for batch_id, callinfo in processed:
for data in batch_groups[batch_id]:
data["variants"].insert(0, callinfo)
out.append([data])
return out + extras | python | def combine_calls_parallel(samples, run_parallel):
"""Combine calls using batched Ensemble approach.
"""
batch_groups, extras = _group_by_batches(samples, _has_ensemble)
out = []
if batch_groups:
processed = run_parallel("combine_calls", ((b, xs, xs[0]) for b, xs in batch_groups.items()))
for batch_id, callinfo in processed:
for data in batch_groups[batch_id]:
data["variants"].insert(0, callinfo)
out.append([data])
return out + extras | [
"def",
"combine_calls_parallel",
"(",
"samples",
",",
"run_parallel",
")",
":",
"batch_groups",
",",
"extras",
"=",
"_group_by_batches",
"(",
"samples",
",",
"_has_ensemble",
")",
"out",
"=",
"[",
"]",
"if",
"batch_groups",
":",
"processed",
"=",
"run_parallel",... | Combine calls using batched Ensemble approach. | [
"Combine",
"calls",
"using",
"batched",
"Ensemble",
"approach",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L112-L123 |
223,624 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _group_by_batches | def _group_by_batches(samples, check_fn):
"""Group calls by batches, processing families together during ensemble calling.
"""
batch_groups = collections.defaultdict(list)
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch_groups[multi.get_batch_for_key(data)].append(data)
else:
extras.append([data])
return batch_groups, extras | python | def _group_by_batches(samples, check_fn):
"""Group calls by batches, processing families together during ensemble calling.
"""
batch_groups = collections.defaultdict(list)
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch_groups[multi.get_batch_for_key(data)].append(data)
else:
extras.append([data])
return batch_groups, extras | [
"def",
"_group_by_batches",
"(",
"samples",
",",
"check_fn",
")",
":",
"batch_groups",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"extras",
"=",
"[",
"]",
"for",
"data",
"in",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"samples",
"]",
... | Group calls by batches, processing families together during ensemble calling. | [
"Group",
"calls",
"by",
"batches",
"processing",
"families",
"together",
"during",
"ensemble",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L133-L143 |
223,625 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _organize_variants | def _organize_variants(samples, batch_id):
"""Retrieve variant calls for all samples, merging batched samples into single VCF.
"""
caller_names = [x["variantcaller"] for x in samples[0]["variants"]]
calls = collections.defaultdict(list)
for data in samples:
for vrn in data["variants"]:
calls[vrn["variantcaller"]].append(vrn["vrn_file"])
data = samples[0]
vrn_files = []
for caller in caller_names:
fnames = calls[caller]
if len(fnames) == 1:
vrn_files.append(fnames[0])
else:
vrn_files.append(population.get_multisample_vcf(fnames, batch_id, caller, data))
return caller_names, vrn_files | python | def _organize_variants(samples, batch_id):
"""Retrieve variant calls for all samples, merging batched samples into single VCF.
"""
caller_names = [x["variantcaller"] for x in samples[0]["variants"]]
calls = collections.defaultdict(list)
for data in samples:
for vrn in data["variants"]:
calls[vrn["variantcaller"]].append(vrn["vrn_file"])
data = samples[0]
vrn_files = []
for caller in caller_names:
fnames = calls[caller]
if len(fnames) == 1:
vrn_files.append(fnames[0])
else:
vrn_files.append(population.get_multisample_vcf(fnames, batch_id, caller, data))
return caller_names, vrn_files | [
"def",
"_organize_variants",
"(",
"samples",
",",
"batch_id",
")",
":",
"caller_names",
"=",
"[",
"x",
"[",
"\"variantcaller\"",
"]",
"for",
"x",
"in",
"samples",
"[",
"0",
"]",
"[",
"\"variants\"",
"]",
"]",
"calls",
"=",
"collections",
".",
"defaultdict"... | Retrieve variant calls for all samples, merging batched samples into single VCF. | [
"Retrieve",
"variant",
"calls",
"for",
"all",
"samples",
"merging",
"batched",
"samples",
"into",
"single",
"VCF",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L145-L161 |
223,626 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _handle_somatic_ensemble | def _handle_somatic_ensemble(vrn_file, data):
"""For somatic ensemble, discard normal samples and filtered variants from vcfs.
Only needed for bcbio.variation based ensemble calling.
"""
if tz.get_in(["metadata", "phenotype"], data, "").lower().startswith("tumor"):
vrn_file_temp = vrn_file.replace(".vcf", "_tumorOnly_noFilteredCalls.vcf")
# Select tumor sample and keep only PASS and . calls
vrn_file = vcfutils.select_sample(in_file=vrn_file, sample=data["name"][1],
out_file=vrn_file_temp,
config=data["config"], filters="PASS,.")
return vrn_file | python | def _handle_somatic_ensemble(vrn_file, data):
"""For somatic ensemble, discard normal samples and filtered variants from vcfs.
Only needed for bcbio.variation based ensemble calling.
"""
if tz.get_in(["metadata", "phenotype"], data, "").lower().startswith("tumor"):
vrn_file_temp = vrn_file.replace(".vcf", "_tumorOnly_noFilteredCalls.vcf")
# Select tumor sample and keep only PASS and . calls
vrn_file = vcfutils.select_sample(in_file=vrn_file, sample=data["name"][1],
out_file=vrn_file_temp,
config=data["config"], filters="PASS,.")
return vrn_file | [
"def",
"_handle_somatic_ensemble",
"(",
"vrn_file",
",",
"data",
")",
":",
"if",
"tz",
".",
"get_in",
"(",
"[",
"\"metadata\"",
",",
"\"phenotype\"",
"]",
",",
"data",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"tumor\"",
")",
... | For somatic ensemble, discard normal samples and filtered variants from vcfs.
Only needed for bcbio.variation based ensemble calling. | [
"For",
"somatic",
"ensemble",
"discard",
"normal",
"samples",
"and",
"filtered",
"variants",
"from",
"vcfs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L163-L174 |
223,627 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _run_ensemble | def _run_ensemble(batch_id, vrn_files, config_file, base_dir, ref_file, data):
"""Run an ensemble call using merging and SVM-based approach in bcbio.variation
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
out_bed_file = os.path.join(base_dir, "{0}-callregions.bed".format(batch_id))
work_dir = "%s-work" % os.path.splitext(out_vcf_file)[0]
if not utils.file_exists(out_vcf_file):
_bcbio_variation_ensemble(vrn_files, out_vcf_file, ref_file, config_file,
base_dir, data)
if not utils.file_exists(out_vcf_file):
base_vcf = glob.glob(os.path.join(work_dir, "prep", "*-cfilter.vcf"))[0]
utils.symlink_plus(base_vcf, out_vcf_file)
if not utils.file_exists(out_bed_file):
multi_beds = glob.glob(os.path.join(work_dir, "prep", "*-multicombine.bed"))
if len(multi_beds) > 0:
utils.symlink_plus(multi_beds[0], out_bed_file)
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": out_bed_file if os.path.exists(out_bed_file) else None} | python | def _run_ensemble(batch_id, vrn_files, config_file, base_dir, ref_file, data):
"""Run an ensemble call using merging and SVM-based approach in bcbio.variation
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
out_bed_file = os.path.join(base_dir, "{0}-callregions.bed".format(batch_id))
work_dir = "%s-work" % os.path.splitext(out_vcf_file)[0]
if not utils.file_exists(out_vcf_file):
_bcbio_variation_ensemble(vrn_files, out_vcf_file, ref_file, config_file,
base_dir, data)
if not utils.file_exists(out_vcf_file):
base_vcf = glob.glob(os.path.join(work_dir, "prep", "*-cfilter.vcf"))[0]
utils.symlink_plus(base_vcf, out_vcf_file)
if not utils.file_exists(out_bed_file):
multi_beds = glob.glob(os.path.join(work_dir, "prep", "*-multicombine.bed"))
if len(multi_beds) > 0:
utils.symlink_plus(multi_beds[0], out_bed_file)
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": out_bed_file if os.path.exists(out_bed_file) else None} | [
"def",
"_run_ensemble",
"(",
"batch_id",
",",
"vrn_files",
",",
"config_file",
",",
"base_dir",
",",
"ref_file",
",",
"data",
")",
":",
"out_vcf_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"\"{0}-ensemble.vcf\"",
".",
"format",
"(",
"... | Run an ensemble call using merging and SVM-based approach in bcbio.variation | [
"Run",
"an",
"ensemble",
"call",
"using",
"merging",
"and",
"SVM",
"-",
"based",
"approach",
"in",
"bcbio",
".",
"variation"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L190-L208 |
223,628 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _write_config_file | def _write_config_file(batch_id, caller_names, base_dir, data):
"""Write YAML configuration to generate an ensemble set of combined calls.
"""
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id))
algorithm = data["config"]["algorithm"]
econfig = {"ensemble": algorithm["ensemble"],
"names": caller_names,
"prep-inputs": False}
intervals = validate.get_analysis_intervals(data, None, base_dir)
if intervals:
econfig["intervals"] = os.path.abspath(intervals)
with open(config_file, "w") as out_handle:
yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False)
return config_file | python | def _write_config_file(batch_id, caller_names, base_dir, data):
"""Write YAML configuration to generate an ensemble set of combined calls.
"""
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id))
algorithm = data["config"]["algorithm"]
econfig = {"ensemble": algorithm["ensemble"],
"names": caller_names,
"prep-inputs": False}
intervals = validate.get_analysis_intervals(data, None, base_dir)
if intervals:
econfig["intervals"] = os.path.abspath(intervals)
with open(config_file, "w") as out_handle:
yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False)
return config_file | [
"def",
"_write_config_file",
"(",
"batch_id",
",",
"caller_names",
",",
"base_dir",
",",
"data",
")",
":",
"config_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"\"config\"",
")",
")",
"config_file",
"=... | Write YAML configuration to generate an ensemble set of combined calls. | [
"Write",
"YAML",
"configuration",
"to",
"generate",
"an",
"ensemble",
"set",
"of",
"combined",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L210-L224 |
223,629 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _get_num_pass | def _get_num_pass(data, n):
"""Calculate the number of samples needed to pass ensemble calling.
"""
numpass = tz.get_in(["config", "algorithm", "ensemble", "numpass"], data)
if numpass:
return int(numpass)
trusted_pct = tz.get_in(["config", "algorithm", "ensemble", "trusted_pct"], data)
if trusted_pct:
return int(math.ceil(float(trusted_pct) * n))
return 2 | python | def _get_num_pass(data, n):
"""Calculate the number of samples needed to pass ensemble calling.
"""
numpass = tz.get_in(["config", "algorithm", "ensemble", "numpass"], data)
if numpass:
return int(numpass)
trusted_pct = tz.get_in(["config", "algorithm", "ensemble", "trusted_pct"], data)
if trusted_pct:
return int(math.ceil(float(trusted_pct) * n))
return 2 | [
"def",
"_get_num_pass",
"(",
"data",
",",
"n",
")",
":",
"numpass",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"ensemble\"",
",",
"\"numpass\"",
"]",
",",
"data",
")",
"if",
"numpass",
":",
"return",
"int",
"(",
"num... | Calculate the number of samples needed to pass ensemble calling. | [
"Calculate",
"the",
"number",
"of",
"samples",
"needed",
"to",
"pass",
"ensemble",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L226-L235 |
223,630 | bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _run_ensemble_intersection | def _run_ensemble_intersection(batch_id, vrn_files, callers, base_dir, edata):
"""Run intersection n out of x based ensemble method using bcbio.variation.recall.
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf.gz".format(batch_id))
if not utils.file_exists(out_vcf_file):
num_pass = _get_num_pass(edata, len(vrn_files))
cmd = [
config_utils.get_program(
"bcbio-variation-recall", edata["config"]),
"ensemble",
"--cores=%s" % edata["config"]["algorithm"].get("num_cores", 1),
"--numpass", str(num_pass),
"--names", ",".join(callers)
]
# Remove filtered calls, do not try to rescue, unless configured
if not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata):
cmd += ["--nofiltered"]
with file_transaction(edata, out_vcf_file) as tx_out_file:
cmd += [tx_out_file, dd.get_ref_file(edata)] + vrn_files
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(str(x) for x in cmd))
do.run(cmd, "Ensemble intersection calling: %s" % (batch_id))
in_data = utils.deepish_copy(edata)
in_data["vrn_file"] = out_vcf_file
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": None} | python | def _run_ensemble_intersection(batch_id, vrn_files, callers, base_dir, edata):
"""Run intersection n out of x based ensemble method using bcbio.variation.recall.
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf.gz".format(batch_id))
if not utils.file_exists(out_vcf_file):
num_pass = _get_num_pass(edata, len(vrn_files))
cmd = [
config_utils.get_program(
"bcbio-variation-recall", edata["config"]),
"ensemble",
"--cores=%s" % edata["config"]["algorithm"].get("num_cores", 1),
"--numpass", str(num_pass),
"--names", ",".join(callers)
]
# Remove filtered calls, do not try to rescue, unless configured
if not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata):
cmd += ["--nofiltered"]
with file_transaction(edata, out_vcf_file) as tx_out_file:
cmd += [tx_out_file, dd.get_ref_file(edata)] + vrn_files
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(str(x) for x in cmd))
do.run(cmd, "Ensemble intersection calling: %s" % (batch_id))
in_data = utils.deepish_copy(edata)
in_data["vrn_file"] = out_vcf_file
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": None} | [
"def",
"_run_ensemble_intersection",
"(",
"batch_id",
",",
"vrn_files",
",",
"callers",
",",
"base_dir",
",",
"edata",
")",
":",
"out_vcf_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"\"{0}-ensemble.vcf.gz\"",
".",
"format",
"(",
"batch_id... | Run intersection n out of x based ensemble method using bcbio.variation.recall. | [
"Run",
"intersection",
"n",
"out",
"of",
"x",
"based",
"ensemble",
"method",
"using",
"bcbio",
".",
"variation",
".",
"recall",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L237-L263 |
223,631 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | _get_callable_regions | def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions or chromosomes.
"""
import pybedtools
callable_files = data.get("callable_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x and x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions | python | def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions or chromosomes.
"""
import pybedtools
callable_files = data.get("callable_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x and x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions | [
"def",
"_get_callable_regions",
"(",
"data",
")",
":",
"import",
"pybedtools",
"callable_files",
"=",
"data",
".",
"get",
"(",
"\"callable_regions\"",
")",
"if",
"callable_files",
":",
"assert",
"len",
"(",
"callable_files",
")",
"==",
"1",
"regions",
"=",
"["... | Retrieve regions to parallelize by from callable regions or chromosomes. | [
"Retrieve",
"regions",
"to",
"parallelize",
"by",
"from",
"callable",
"regions",
"or",
"chromosomes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L75-L92 |
223,632 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | _split_by_callable_region | def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts | python | def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts | [
"def",
"_split_by_callable_region",
"(",
"data",
")",
":",
"batch",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"metadata\"",
",",
"\"batch\"",
")",
",",
"data",
")",
"jointcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"... | Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions. | [
"Split",
"by",
"callable",
"or",
"variant",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L94-L114 |
223,633 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | _is_jointcaller_compatible | def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if isinstance(variantcaller, (list, tuple)) and len(variantcaller) == 1:
variantcaller = variantcaller[0]
return jointcaller == "%s-joint" % variantcaller or not variantcaller | python | def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if isinstance(variantcaller, (list, tuple)) and len(variantcaller) == 1:
variantcaller = variantcaller[0]
return jointcaller == "%s-joint" % variantcaller or not variantcaller | [
"def",
"_is_jointcaller_compatible",
"(",
"data",
")",
":",
"jointcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"jointcaller\"",
")",
",",
"data",
")",
"variantcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"... | Match variant caller inputs to compatible joint callers. | [
"Match",
"variant",
"caller",
"inputs",
"to",
"compatible",
"joint",
"callers",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L116-L123 |
223,634 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | square_off | def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller", require_bam=False):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras | python | def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller", require_bam=False):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras | [
"def",
"square_off",
"(",
"samples",
",",
"run_parallel",
")",
":",
"to_process",
"=",
"[",
"]",
"extras",
"=",
"[",
"]",
"for",
"data",
"in",
"[",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"samples",
"]",
":",
"added",
"=",
... | Perform joint calling at all variants within a batch. | [
"Perform",
"joint",
"calling",
"at",
"all",
"variants",
"within",
"a",
"batch",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L125-L143 |
223,635 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | _combine_to_jointcaller | def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out | python | def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out | [
"def",
"_combine_to_jointcaller",
"(",
"processed",
")",
":",
"by_vrn_file",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"data",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"processed",
")",
":",
"key",
"=",
"(",
"tz",
".",
"get_in",
... | Add joint calling information to variants, while collapsing independent regions. | [
"Add",
"joint",
"calling",
"information",
"to",
"variants",
"while",
"collapsing",
"independent",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L145-L158 |
223,636 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | square_batch_region | def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
from bcbio.variation import sentieon, strelka2
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data] | python | def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
from bcbio.variation import sentieon, strelka2
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data] | [
"def",
"square_batch_region",
"(",
"data",
",",
"region",
",",
"bam_files",
",",
"vrn_files",
",",
"out_file",
")",
":",
"from",
"bcbio",
".",
"variation",
"import",
"sentieon",
",",
"strelka2",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",... | Perform squaring of a batch in a supplied region, with input BAMs | [
"Perform",
"squaring",
"of",
"a",
"batch",
"in",
"a",
"supplied",
"region",
"with",
"input",
"BAMs"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L172-L194 |
223,637 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | _fix_orig_vcf_refs | def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data | python | def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data | [
"def",
"_fix_orig_vcf_refs",
"(",
"data",
")",
":",
"variantcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"variantcaller\"",
")",
",",
"data",
")",
"if",
"variantcaller",
":",
"data",
"[",
"\"vrn_file_orig\"",
"]",
"... | Supply references to initial variantcalls if run in addition to batching. | [
"Supply",
"references",
"to",
"initial",
"variantcalls",
"if",
"run",
"in",
"addition",
"to",
"batching",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L196-L207 |
223,638 | bcbio/bcbio-nextgen | bcbio/variation/joint.py | _square_batch_bcbio_variation | def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file | python | def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file | [
"def",
"_square_batch_bcbio_variation",
"(",
"data",
",",
"region",
",",
"bam_files",
",",
"vrn_files",
",",
"out_file",
",",
"todo",
"=",
"\"square\"",
")",
":",
"ref_file",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"reference\"",
",",
"\"fasta\"",
",",
"\"base... | Run squaring or merging analysis using bcbio.variation.recall. | [
"Run",
"squaring",
"or",
"merging",
"analysis",
"using",
"bcbio",
".",
"variation",
".",
"recall",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L209-L236 |
223,639 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | run | def run(items):
"""Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions
"""
items = [utils.to_single_data(x) for x in items]
work_dir = _sv_workdir(items[0])
input_backs = list(set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "seq2c") for d in items])))
coverage_file = _combine_coverages(items, work_dir, input_backs)
read_mapping_file = _calculate_mapping_reads(items, work_dir, input_backs)
normal_names = []
if input_backs:
with open(input_backs[0]) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
normal_names.append(line.split()[0])
normal_names += [dd.get_sample_name(x) for x in items if population.get_affected_status(x) == 1]
seq2c_calls_file = _call_cnv(items, work_dir, read_mapping_file, coverage_file, normal_names)
items = _split_cnv(items, seq2c_calls_file, read_mapping_file, coverage_file)
return items | python | def run(items):
"""Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions
"""
items = [utils.to_single_data(x) for x in items]
work_dir = _sv_workdir(items[0])
input_backs = list(set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "seq2c") for d in items])))
coverage_file = _combine_coverages(items, work_dir, input_backs)
read_mapping_file = _calculate_mapping_reads(items, work_dir, input_backs)
normal_names = []
if input_backs:
with open(input_backs[0]) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
normal_names.append(line.split()[0])
normal_names += [dd.get_sample_name(x) for x in items if population.get_affected_status(x) == 1]
seq2c_calls_file = _call_cnv(items, work_dir, read_mapping_file, coverage_file, normal_names)
items = _split_cnv(items, seq2c_calls_file, read_mapping_file, coverage_file)
return items | [
"def",
"run",
"(",
"items",
")",
":",
"items",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"items",
"]",
"work_dir",
"=",
"_sv_workdir",
"(",
"items",
"[",
"0",
"]",
")",
"input_backs",
"=",
"list",
"(",
"set",
"(",
... | Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions | [
"Normalization",
"and",
"log2",
"ratio",
"calculation",
"plus",
"CNV",
"calling",
"for",
"full",
"cohort",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L41-L65 |
223,640 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | prep_seq2c_bed | def prep_seq2c_bed(data):
"""Selecting the bed file, cleaning, and properly annotating for Seq2C
"""
if dd.get_background_cnv_reference(data, "seq2c"):
bed_file = _background_to_bed(dd.get_background_cnv_reference(data, "seq2c"), data)
else:
bed_file = regions.get_sv_bed(data)
if bed_file:
bed_file = bedutils.clean_file(bed_file, data, prefix="svregions-")
else:
bed_file = bedutils.clean_file(dd.get_variant_regions(data), data)
if not bed_file:
return None
col_num = bt.BedTool(bed_file).field_count()
if col_num < 4:
annotated_file = annotate.add_genes(bed_file, data, max_distance=0)
if annotated_file == bed_file:
raise ValueError("BED file for Seq2C must be annotated with gene names, "
"however the input BED is 3-columns and we have no transcript "
"data to annotate with " + bed_file)
annotated_file = annotate.gene_one_per_line(annotated_file, data)
else:
annotated_file = bed_file
ready_file = "%s-seq2cclean.bed" % (utils.splitext_plus(annotated_file)[0])
if not utils.file_uptodate(ready_file, annotated_file):
bed = bt.BedTool(annotated_file)
if col_num > 4 and col_num != 8:
bed = bed.cut(range(4))
bed = bed.filter(lambda x: x.name not in ["", ".", "-"])
with file_transaction(data, ready_file) as tx_out_file:
bed.saveas(tx_out_file)
logger.debug("Saved Seq2C clean annotated ready input BED into " + ready_file)
return ready_file | python | def prep_seq2c_bed(data):
"""Selecting the bed file, cleaning, and properly annotating for Seq2C
"""
if dd.get_background_cnv_reference(data, "seq2c"):
bed_file = _background_to_bed(dd.get_background_cnv_reference(data, "seq2c"), data)
else:
bed_file = regions.get_sv_bed(data)
if bed_file:
bed_file = bedutils.clean_file(bed_file, data, prefix="svregions-")
else:
bed_file = bedutils.clean_file(dd.get_variant_regions(data), data)
if not bed_file:
return None
col_num = bt.BedTool(bed_file).field_count()
if col_num < 4:
annotated_file = annotate.add_genes(bed_file, data, max_distance=0)
if annotated_file == bed_file:
raise ValueError("BED file for Seq2C must be annotated with gene names, "
"however the input BED is 3-columns and we have no transcript "
"data to annotate with " + bed_file)
annotated_file = annotate.gene_one_per_line(annotated_file, data)
else:
annotated_file = bed_file
ready_file = "%s-seq2cclean.bed" % (utils.splitext_plus(annotated_file)[0])
if not utils.file_uptodate(ready_file, annotated_file):
bed = bt.BedTool(annotated_file)
if col_num > 4 and col_num != 8:
bed = bed.cut(range(4))
bed = bed.filter(lambda x: x.name not in ["", ".", "-"])
with file_transaction(data, ready_file) as tx_out_file:
bed.saveas(tx_out_file)
logger.debug("Saved Seq2C clean annotated ready input BED into " + ready_file)
return ready_file | [
"def",
"prep_seq2c_bed",
"(",
"data",
")",
":",
"if",
"dd",
".",
"get_background_cnv_reference",
"(",
"data",
",",
"\"seq2c\"",
")",
":",
"bed_file",
"=",
"_background_to_bed",
"(",
"dd",
".",
"get_background_cnv_reference",
"(",
"data",
",",
"\"seq2c\"",
")",
... | Selecting the bed file, cleaning, and properly annotating for Seq2C | [
"Selecting",
"the",
"bed",
"file",
"cleaning",
"and",
"properly",
"annotating",
"for",
"Seq2C"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L67-L102 |
223,641 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _background_to_bed | def _background_to_bed(back_file, data):
"""Convert a seq2c background file with calls into BED regions for coverage.
seq2c background files are a concatenation of mapping and sample_coverages from
potentially multiple samples. We use the coverage information from the first
sample to translate into BED.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-regions.bed" % utils.splitext_plus(os.path.basename(back_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(back_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
sample = in_handle.readline().split("\t")[0]
for line in in_handle:
if line.startswith(sample) and len(line.split()) >= 5:
_, gene, chrom, start, end = line.split()[:5]
out_handle.write("%s\n" % ("\t".join([chrom, str(int(start) - 1), end, gene])))
return out_file | python | def _background_to_bed(back_file, data):
"""Convert a seq2c background file with calls into BED regions for coverage.
seq2c background files are a concatenation of mapping and sample_coverages from
potentially multiple samples. We use the coverage information from the first
sample to translate into BED.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-regions.bed" % utils.splitext_plus(os.path.basename(back_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(back_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
sample = in_handle.readline().split("\t")[0]
for line in in_handle:
if line.startswith(sample) and len(line.split()) >= 5:
_, gene, chrom, start, end = line.split()[:5]
out_handle.write("%s\n" % ("\t".join([chrom, str(int(start) - 1), end, gene])))
return out_file | [
"def",
"_background_to_bed",
"(",
"back_file",
",",
"data",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
",",
"\"b... | Convert a seq2c background file with calls into BED regions for coverage.
seq2c background files are a concatenation of mapping and sample_coverages from
potentially multiple samples. We use the coverage information from the first
sample to translate into BED. | [
"Convert",
"a",
"seq2c",
"background",
"file",
"with",
"calls",
"into",
"BED",
"regions",
"for",
"coverage",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L104-L122 |
223,642 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _get_seq2c_options | def _get_seq2c_options(data):
"""Get adjustable, through resources, or default options for seq2c.
"""
cov2lr_possible_opts = ["-F"]
defaults = {}
ropts = config_utils.get_resources("seq2c", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for seq2c" % ropts
defaults.update(dict(tz.partition(2, ropts)))
cov2lr_out, lr2gene_out = [], []
for k, v in defaults.items():
if k in cov2lr_possible_opts:
cov2lr_out += [str(k), str(v)]
else:
lr2gene_out += [str(k), str(v)]
return cov2lr_out, lr2gene_out | python | def _get_seq2c_options(data):
"""Get adjustable, through resources, or default options for seq2c.
"""
cov2lr_possible_opts = ["-F"]
defaults = {}
ropts = config_utils.get_resources("seq2c", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for seq2c" % ropts
defaults.update(dict(tz.partition(2, ropts)))
cov2lr_out, lr2gene_out = [], []
for k, v in defaults.items():
if k in cov2lr_possible_opts:
cov2lr_out += [str(k), str(v)]
else:
lr2gene_out += [str(k), str(v)]
return cov2lr_out, lr2gene_out | [
"def",
"_get_seq2c_options",
"(",
"data",
")",
":",
"cov2lr_possible_opts",
"=",
"[",
"\"-F\"",
"]",
"defaults",
"=",
"{",
"}",
"ropts",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"seq2c\"",
",",
"data",
"[",
"\"config\"",
"]",
")",
".",
"get",
"(",... | Get adjustable, through resources, or default options for seq2c. | [
"Get",
"adjustable",
"through",
"resources",
"or",
"default",
"options",
"for",
"seq2c",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L124-L138 |
223,643 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | to_vcf | def to_vcf(in_tsv, data):
"""Convert seq2c output file into BED output.
"""
call_convert = {"Amp": "DUP", "Del": "DEL"}
out_file = "%s.vcf" % utils.splitext_plus(in_tsv)[0]
if not utils.file_uptodate(out_file, in_tsv):
with file_transaction(data, out_file) as tx_out_file:
with open(in_tsv) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(VCF_HEADER + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n"
% (dd.get_sample_name(data)))
header = in_handle.readline().split("\t")
for cur in (dict(zip(header, l.split("\t"))) for l in in_handle):
if cur["Amp_Del"] in call_convert:
svtype = call_convert[cur["Amp_Del"]]
info = "SVTYPE=%s;END=%s;SVLEN=%s;FOLD_CHANGE_LOG=%s;PROBES=%s;GENE=%s" % (
svtype, cur["End"], int(cur["End"]) - int(cur["Start"]),
cur["Log2ratio"], cur["Ab_Seg"], cur["Gene"])
out_handle.write("\t".join([cur["Chr"], cur["Start"], ".", "N", "<%s>" % (svtype),
".", ".", info, "GT", "1/1"]) + "\n")
return vcfutils.sort_by_ref(out_file, data) | python | def to_vcf(in_tsv, data):
"""Convert seq2c output file into BED output.
"""
call_convert = {"Amp": "DUP", "Del": "DEL"}
out_file = "%s.vcf" % utils.splitext_plus(in_tsv)[0]
if not utils.file_uptodate(out_file, in_tsv):
with file_transaction(data, out_file) as tx_out_file:
with open(in_tsv) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(VCF_HEADER + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n"
% (dd.get_sample_name(data)))
header = in_handle.readline().split("\t")
for cur in (dict(zip(header, l.split("\t"))) for l in in_handle):
if cur["Amp_Del"] in call_convert:
svtype = call_convert[cur["Amp_Del"]]
info = "SVTYPE=%s;END=%s;SVLEN=%s;FOLD_CHANGE_LOG=%s;PROBES=%s;GENE=%s" % (
svtype, cur["End"], int(cur["End"]) - int(cur["Start"]),
cur["Log2ratio"], cur["Ab_Seg"], cur["Gene"])
out_handle.write("\t".join([cur["Chr"], cur["Start"], ".", "N", "<%s>" % (svtype),
".", ".", info, "GT", "1/1"]) + "\n")
return vcfutils.sort_by_ref(out_file, data) | [
"def",
"to_vcf",
"(",
"in_tsv",
",",
"data",
")",
":",
"call_convert",
"=",
"{",
"\"Amp\"",
":",
"\"DUP\"",
",",
"\"Del\"",
":",
"\"DEL\"",
"}",
"out_file",
"=",
"\"%s.vcf\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_tsv",
")",
"[",
"0",
"]",
"if",... | Convert seq2c output file into BED output. | [
"Convert",
"seq2c",
"output",
"file",
"into",
"BED",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L198-L218 |
223,644 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _combine_coverages | def _combine_coverages(items, work_dir, input_backs=None):
"""Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
"""
out_file = os.path.join(work_dir, "sample_coverages.txt")
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_f:
for data in items:
cov_file = tz.get_in(["depth", "bins", "seq2c"], data)
with open(cov_file) as cov_f:
out_f.write(cov_f.read())
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) >= 4:
out_f.write(line)
return out_file | python | def _combine_coverages(items, work_dir, input_backs=None):
"""Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
"""
out_file = os.path.join(work_dir, "sample_coverages.txt")
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_f:
for data in items:
cov_file = tz.get_in(["depth", "bins", "seq2c"], data)
with open(cov_file) as cov_f:
out_f.write(cov_f.read())
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) >= 4:
out_f.write(line)
return out_file | [
"def",
"_combine_coverages",
"(",
"items",
",",
"work_dir",
",",
"input_backs",
"=",
"None",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"sample_coverages.txt\"",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"o... | Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file. | [
"Combine",
"coverage",
"cnns",
"calculated",
"for",
"individual",
"inputs",
"into",
"single",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L290-L309 |
223,645 | bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _calculate_mapping_reads | def _calculate_mapping_reads(items, work_dir, input_backs=None):
"""Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file.
"""
out_file = os.path.join(work_dir, "mapping_reads.txt")
if not utils.file_exists(out_file):
lines = []
for data in items:
count = 0
for line in subprocess.check_output([
"samtools", "idxstats", dd.get_align_bam(data)]).decode().split("\n"):
if line.strip():
count += int(line.split("\t")[2])
lines.append("%s\t%s" % (dd.get_sample_name(data), count))
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("\n".join(lines) + "\n")
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
out_handle.write(line)
return out_file | python | def _calculate_mapping_reads(items, work_dir, input_backs=None):
"""Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file.
"""
out_file = os.path.join(work_dir, "mapping_reads.txt")
if not utils.file_exists(out_file):
lines = []
for data in items:
count = 0
for line in subprocess.check_output([
"samtools", "idxstats", dd.get_align_bam(data)]).decode().split("\n"):
if line.strip():
count += int(line.split("\t")[2])
lines.append("%s\t%s" % (dd.get_sample_name(data), count))
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("\n".join(lines) + "\n")
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
out_handle.write(line)
return out_file | [
"def",
"_calculate_mapping_reads",
"(",
"items",
",",
"work_dir",
",",
"input_backs",
"=",
"None",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"mapping_reads.txt\"",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
... | Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file. | [
"Calculate",
"read",
"counts",
"from",
"samtools",
"idxstats",
"for",
"each",
"sample",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L311-L335 |
223,646 | bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | get_resources | def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.safe_load(in_handle)
def resource_file_path(x):
if isinstance(x, six.string_types) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data) | python | def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.safe_load(in_handle)
def resource_file_path(x):
if isinstance(x, six.string_types) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data) | [
"def",
"get_resources",
"(",
"genome",
",",
"ref_file",
",",
"data",
")",
":",
"base_dir",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"ref_file",
")",
")",
"resource_file",
"=",
"os",
".",
"path",
".",
"join... | Retrieve genome information from a genome-references.yaml file. | [
"Retrieve",
"genome",
"information",
"from",
"a",
"genome",
"-",
"references",
".",
"yaml",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L24-L42 |
223,647 | bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | add_required_resources | def add_required_resources(resources):
"""Add default or empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"],
["variation", "lcr"], ["variation", "polyx"],
["variation", "encode_blacklist"], ["variation", "gc_profile"],
["variation", "germline_het_pon"],
["variation", "train_hapmap"], ["variation", "train_indels"],
["variation", "editing"], ["variation", "exac"], ["variation", "esp"],
["variation", "gnomad_exome"],
["variation", "1000g"], ["aliases", "human"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources | python | def add_required_resources(resources):
"""Add default or empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"],
["variation", "lcr"], ["variation", "polyx"],
["variation", "encode_blacklist"], ["variation", "gc_profile"],
["variation", "germline_het_pon"],
["variation", "train_hapmap"], ["variation", "train_indels"],
["variation", "editing"], ["variation", "exac"], ["variation", "esp"],
["variation", "gnomad_exome"],
["variation", "1000g"], ["aliases", "human"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources | [
"def",
"add_required_resources",
"(",
"resources",
")",
":",
"required",
"=",
"[",
"[",
"\"variation\"",
",",
"\"cosmic\"",
"]",
",",
"[",
"\"variation\"",
",",
"\"clinvar\"",
"]",
",",
"[",
"\"variation\"",
",",
"\"dbsnp\"",
"]",
",",
"[",
"\"variation\"",
... | Add default or empty values for required resources referenced in CWL | [
"Add",
"default",
"or",
"empty",
"values",
"for",
"required",
"resources",
"referenced",
"in",
"CWL"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L44-L58 |
223,648 | bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | ensure_annotations | def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources | python | def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources | [
"def",
"ensure_annotations",
"(",
"resources",
",",
"data",
")",
":",
"transcript_gff",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"rnaseq\"",
",",
"\"transcripts\"",
"]",
",",
"resources",
")",
"if",
"transcript_gff",
"and",
"utils",
".",
"file_exists",
"(",
"tr... | Prepare any potentially missing annotations for downstream processing in a local directory. | [
"Prepare",
"any",
"potentially",
"missing",
"annotations",
"for",
"downstream",
"processing",
"in",
"a",
"local",
"directory",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L60-L68 |
223,649 | bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | abs_file_paths | def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, six.string_types):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, six.string_types):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out | python | def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, six.string_types):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, six.string_types):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out | [
"def",
"abs_file_paths",
"(",
"xs",
",",
"base_dir",
"=",
"None",
",",
"ignore_keys",
"=",
"None",
",",
"fileonly_keys",
"=",
"None",
",",
"cur_key",
"=",
"None",
",",
"do_download",
"=",
"True",
")",
":",
"ignore_keys",
"=",
"set",
"(",
"[",
"]",
")",... | Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively | [
"Normalize",
"any",
"file",
"paths",
"found",
"in",
"a",
"subdirectory",
"of",
"configuration",
"input",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L72-L113 |
223,650 | bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | _get_galaxy_tool_info | def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info | python | def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info | [
"def",
"_get_galaxy_tool_info",
"(",
"galaxy_base",
")",
":",
"ini_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"galaxy_base",
",",
"\"universe_wsgi.ini\"",
")",
"info",
"=",
"{",
"\"tool_data_table_config_path\"",
":",
"os",
".",
"path",
".",
"join",
"(",... | Retrieve Galaxy tool-data information from defaults or galaxy config file. | [
"Retrieve",
"Galaxy",
"tool",
"-",
"data",
"information",
"from",
"defaults",
"or",
"galaxy",
"config",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L195-L207 |
223,651 | bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | get_builds | def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out | python | def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out | [
"def",
"get_builds",
"(",
"galaxy_base",
")",
":",
"name",
"=",
"\"samtools\"",
"galaxy_config",
"=",
"_get_galaxy_tool_info",
"(",
"galaxy_base",
")",
"galaxy_dt",
"=",
"_get_galaxy_data_table",
"(",
"name",
",",
"galaxy_config",
"[",
"\"tool_data_table_config_path\"",... | Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added. | [
"Retrieve",
"configured",
"genome",
"builds",
"and",
"reference",
"files",
"using",
"Galaxy",
"configuration",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L268-L285 |
223,652 | bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _get_jvm_opts | def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts) | python | def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts) | [
"def",
"_get_jvm_opts",
"(",
"config",
",",
"tmp_dir",
")",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"varscan\"",
",",
"config",
")",
"jvm_opts",
"=",
"resources",
".",
"get",
"(",
"\"jvm_opts\"",
",",
"[",
"\"-Xmx750m\"",
",",
"\"... | Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF. | [
"Retrieve",
"common",
"options",
"for",
"running",
"VarScan",
".",
"Handles",
"jvm_opts",
"setting",
"user",
"and",
"country",
"to",
"English",
"to",
"avoid",
"issues",
"with",
"different",
"locales",
"producing",
"non",
"-",
"compliant",
"VCF",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L36-L48 |
223,653 | bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _varscan_options_from_config | def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts | python | def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts | [
"def",
"_varscan_options_from_config",
"(",
"config",
")",
":",
"opts",
"=",
"[",
"\"--min-coverage 5\"",
",",
"\"--p-value 0.98\"",
",",
"\"--strand-filter 1\"",
"]",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"varscan\"",
",",
"config",
")",
"i... | Retrieve additional options for VarScan from the configuration. | [
"Retrieve",
"additional",
"options",
"for",
"VarScan",
"from",
"the",
"configuration",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L51-L58 |
223,654 | bcbio/bcbio-nextgen | bcbio/variation/varscan.py | spv_freq_filter | def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line | python | def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line | [
"def",
"spv_freq_filter",
"(",
"line",
",",
"tumor_index",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#CHROM\"",
")",
":",
"headers",
"=",
"[",
"(",
"'##FILTER=<ID=SpvFreq,Description=\"High frequency (tumor FREQ > 0.35) '",
"'and low p-value for somatic (SPV < 0.05... | Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters. | [
"Filter",
"VarScan",
"calls",
"based",
"on",
"the",
"SPV",
"value",
"and",
"frequency",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L61-L91 |
223,655 | bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _create_sample_list | def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file | python | def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file | [
"def",
"_create_sample_list",
"(",
"in_bams",
",",
"vcf_file",
")",
":",
"out_file",
"=",
"\"%s-sample_list.txt\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"vcf_file",
")",
"[",
"0",
"]",
"with",
"open",
"(",
"out_file",
",",
"\"w\"",
")",
"as",
"o... | Pull sample names from input BAMs and create input sample list. | [
"Pull",
"sample",
"names",
"from",
"input",
"BAMs",
"and",
"create",
"input",
"sample",
"list",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L274-L283 |
223,656 | bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _varscan_work | def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config) | python | def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config) | [
"def",
"_varscan_work",
"(",
"align_bams",
",",
"ref_file",
",",
"items",
",",
"target_regions",
",",
"out_file",
")",
":",
"config",
"=",
"items",
"[",
"0",
"]",
"[",
"\"config\"",
"]",
"orig_out_file",
"=",
"out_file",
"out_file",
"=",
"orig_out_file",
"."... | Perform SNP and indel genotyping with VarScan. | [
"Perform",
"SNP",
"and",
"indel",
"genotyping",
"with",
"VarScan",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L286-L327 |
223,657 | bcbio/bcbio-nextgen | bcbio/distributed/ipythontasks.py | apply | def apply(object, args=None, kwargs=None):
"""Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | python | def apply(object, args=None, kwargs=None):
"""Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | [
"def",
"apply",
"(",
"object",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"(",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"return",
"object",
"(",
"*",
"ar... | Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply | [
"Python3",
"apply",
"replacement",
"for",
"double",
"unpacking",
"of",
"inputs",
"during",
"apply",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipythontasks.py#L71-L80 |
223,658 | bcbio/bcbio-nextgen | bcbio/structural/annotate.py | add_genes | def add_genes(in_file, data, max_distance=10000, work_dir=None):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
_add_genes_to_bed(in_file, gene_file, fai_file, tx_out_file, data, max_distance)
return out_file
else:
return in_file | python | def add_genes(in_file, data, max_distance=10000, work_dir=None):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
_add_genes_to_bed(in_file, gene_file, fai_file, tx_out_file, data, max_distance)
return out_file
else:
return in_file | [
"def",
"add_genes",
"(",
"in_file",
",",
"data",
",",
"max_distance",
"=",
"10000",
",",
"work_dir",
"=",
"None",
")",
":",
"gene_file",
"=",
"regions",
".",
"get_sv_bed",
"(",
"data",
",",
"\"exons\"",
",",
"out_dir",
"=",
"os",
".",
"path",
".",
"dir... | Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event | [
"Add",
"gene",
"annotations",
"to",
"a",
"BED",
"file",
"from",
"pre",
"-",
"prepared",
"RNA",
"-",
"seq",
"data",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/annotate.py#L17-L33 |
223,659 | bcbio/bcbio-nextgen | bcbio/structural/annotate.py | _add_genes_to_bed | def _add_genes_to_bed(in_file, gene_file, fai_file, out_file, data, max_distance=10000):
"""Re-usable subcomponent that annotates BED file genes from another BED
"""
try:
input_rec = next(iter(pybedtools.BedTool(in_file)))
except StopIteration: # empty file
utils.copy_plus(in_file, out_file)
return
# keep everything after standard chrom/start/end, 1-based
extra_fields = list(range(4, len(input_rec.fields) + 1))
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s || $NF < -%s) $%s = "."} {print}'""" %
(max_distance, max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd(os.path.dirname(out_file))
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
# Ensure gene transcripts match reference genome
ready_gene_file = os.path.join(os.path.dirname(out_file), "%s-genomeonly.bed" %
(utils.splitext_plus(os.path.basename(gene_file))[0]))
ready_gene_file = bedutils.subset_to_genome(gene_file, ready_gene_file, data)
exports = "export TMPDIR=%s && %s" % (os.path.dirname(out_file), utils.local_path_export())
bcbio_py = sys.executable
gsort = config_utils.get_program("gsort", data)
cmd = ("{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{gsort} - {fai_file} | "
"bedtools closest -g {fai_file} "
"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info") | python | def _add_genes_to_bed(in_file, gene_file, fai_file, out_file, data, max_distance=10000):
"""Re-usable subcomponent that annotates BED file genes from another BED
"""
try:
input_rec = next(iter(pybedtools.BedTool(in_file)))
except StopIteration: # empty file
utils.copy_plus(in_file, out_file)
return
# keep everything after standard chrom/start/end, 1-based
extra_fields = list(range(4, len(input_rec.fields) + 1))
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s || $NF < -%s) $%s = "."} {print}'""" %
(max_distance, max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd(os.path.dirname(out_file))
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
# Ensure gene transcripts match reference genome
ready_gene_file = os.path.join(os.path.dirname(out_file), "%s-genomeonly.bed" %
(utils.splitext_plus(os.path.basename(gene_file))[0]))
ready_gene_file = bedutils.subset_to_genome(gene_file, ready_gene_file, data)
exports = "export TMPDIR=%s && %s" % (os.path.dirname(out_file), utils.local_path_export())
bcbio_py = sys.executable
gsort = config_utils.get_program("gsort", data)
cmd = ("{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{gsort} - {fai_file} | "
"bedtools closest -g {fai_file} "
"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info") | [
"def",
"_add_genes_to_bed",
"(",
"in_file",
",",
"gene_file",
",",
"fai_file",
",",
"out_file",
",",
"data",
",",
"max_distance",
"=",
"10000",
")",
":",
"try",
":",
"input_rec",
"=",
"next",
"(",
"iter",
"(",
"pybedtools",
".",
"BedTool",
"(",
"in_file",
... | Re-usable subcomponent that annotates BED file genes from another BED | [
"Re",
"-",
"usable",
"subcomponent",
"that",
"annotates",
"BED",
"file",
"genes",
"from",
"another",
"BED"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/annotate.py#L35-L72 |
223,660 | bcbio/bcbio-nextgen | bcbio/distributed/clusterk.py | create | def create(parallel):
"""Create a queue based on the provided parallel arguments.
TODO Startup/tear-down. Currently using default queue for testing
"""
queue = {k: v for k, v in parallel.items() if k in ["queue", "cores_per_job", "mem"]}
yield queue | python | def create(parallel):
"""Create a queue based on the provided parallel arguments.
TODO Startup/tear-down. Currently using default queue for testing
"""
queue = {k: v for k, v in parallel.items() if k in ["queue", "cores_per_job", "mem"]}
yield queue | [
"def",
"create",
"(",
"parallel",
")",
":",
"queue",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"parallel",
".",
"items",
"(",
")",
"if",
"k",
"in",
"[",
"\"queue\"",
",",
"\"cores_per_job\"",
",",
"\"mem\"",
"]",
"}",
"yield",
"queue"
] | Create a queue based on the provided parallel arguments.
TODO Startup/tear-down. Currently using default queue for testing | [
"Create",
"a",
"queue",
"based",
"on",
"the",
"provided",
"parallel",
"arguments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/clusterk.py#L11-L17 |
223,661 | bcbio/bcbio-nextgen | bcbio/distributed/clusterk.py | runner | def runner(queue, parallel):
"""Run individual jobs on an existing queue.
"""
def run(fn_name, items):
logger.info("clusterk: %s" % fn_name)
assert "wrapper" in parallel, "Clusterk requires bcbio-nextgen-vm wrapper"
fn = getattr(__import__("{base}.clusterktasks".format(base=parallel["module"]),
fromlist=["clusterktasks"]),
parallel["wrapper"])
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "pack"])}
out = []
for data in [fn(fn_name, queue, parallel.get("wrapper_args"), wrap_parallel, x) for x in items]:
if data:
out.extend(data)
return out
return run | python | def runner(queue, parallel):
"""Run individual jobs on an existing queue.
"""
def run(fn_name, items):
logger.info("clusterk: %s" % fn_name)
assert "wrapper" in parallel, "Clusterk requires bcbio-nextgen-vm wrapper"
fn = getattr(__import__("{base}.clusterktasks".format(base=parallel["module"]),
fromlist=["clusterktasks"]),
parallel["wrapper"])
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "pack"])}
out = []
for data in [fn(fn_name, queue, parallel.get("wrapper_args"), wrap_parallel, x) for x in items]:
if data:
out.extend(data)
return out
return run | [
"def",
"runner",
"(",
"queue",
",",
"parallel",
")",
":",
"def",
"run",
"(",
"fn_name",
",",
"items",
")",
":",
"logger",
".",
"info",
"(",
"\"clusterk: %s\"",
"%",
"fn_name",
")",
"assert",
"\"wrapper\"",
"in",
"parallel",
",",
"\"Clusterk requires bcbio-ne... | Run individual jobs on an existing queue. | [
"Run",
"individual",
"jobs",
"on",
"an",
"existing",
"queue",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/clusterk.py#L19-L34 |
223,662 | bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | is_gene_list | def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False | python | def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False | [
"def",
"is_gene_list",
"(",
"bed_file",
")",
":",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"bed_file",
")",
"as",
"in_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"if",
"len",
"... | Check if the file is only a list of genes, not a BED | [
"Check",
"if",
"the",
"file",
"is",
"only",
"a",
"list",
"of",
"genes",
"not",
"a",
"BED"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L51-L60 |
223,663 | bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _find_gene_list_from_bed | def _find_gene_list_from_bed(bed_file, base_file, data):
"""Retrieve list of gene names from input BED file.
"""
# Check for a gene list, we can just return that.
if is_gene_list(bed_file):
return bed_file
out_file = "%s-genes.txt" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
genes = set([])
import pybedtools
with utils.open_gzipsafe(bed_file) as in_handle:
for r in pybedtools.BedTool(in_handle):
if r.name:
if not r.name.startswith("{"):
genes.add(r.name)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
if len(genes) > 0:
out_handle.write("\n".join(sorted(list(genes))) + "\n")
if utils.file_exists(out_file):
return out_file | python | def _find_gene_list_from_bed(bed_file, base_file, data):
"""Retrieve list of gene names from input BED file.
"""
# Check for a gene list, we can just return that.
if is_gene_list(bed_file):
return bed_file
out_file = "%s-genes.txt" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
genes = set([])
import pybedtools
with utils.open_gzipsafe(bed_file) as in_handle:
for r in pybedtools.BedTool(in_handle):
if r.name:
if not r.name.startswith("{"):
genes.add(r.name)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
if len(genes) > 0:
out_handle.write("\n".join(sorted(list(genes))) + "\n")
if utils.file_exists(out_file):
return out_file | [
"def",
"_find_gene_list_from_bed",
"(",
"bed_file",
",",
"base_file",
",",
"data",
")",
":",
"# Check for a gene list, we can just return that.",
"if",
"is_gene_list",
"(",
"bed_file",
")",
":",
"return",
"bed_file",
"out_file",
"=",
"\"%s-genes.txt\"",
"%",
"utils",
... | Retrieve list of gene names from input BED file. | [
"Retrieve",
"list",
"of",
"gene",
"names",
"from",
"input",
"BED",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L62-L82 |
223,664 | bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _combine_files | def _combine_files(tsv_files, work_dir, data):
"""Combine multiple priority tsv files into a final sorted output.
"""
header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype",
"lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"])
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
tmpdir = os.path.dirname(tx_out_file)
input_files = " ".join(tsv_files)
sort_cmd = bedutils.get_sort_cmd(tmpdir)
cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}"
do.run(cmd.format(**locals()), "Combine prioritized from multiple callers")
return out_file | python | def _combine_files(tsv_files, work_dir, data):
"""Combine multiple priority tsv files into a final sorted output.
"""
header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype",
"lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"])
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
tmpdir = os.path.dirname(tx_out_file)
input_files = " ".join(tsv_files)
sort_cmd = bedutils.get_sort_cmd(tmpdir)
cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}"
do.run(cmd.format(**locals()), "Combine prioritized from multiple callers")
return out_file | [
"def",
"_combine_files",
"(",
"tsv_files",
",",
"work_dir",
",",
"data",
")",
":",
"header",
"=",
"\"\\t\"",
".",
"join",
"(",
"[",
"\"caller\"",
",",
"\"sample\"",
",",
"\"chrom\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"svtype\"",
",",
"\"lof\"",
",",... | Combine multiple priority tsv files into a final sorted output. | [
"Combine",
"multiple",
"priority",
"tsv",
"files",
"into",
"a",
"final",
"sorted",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L145-L159 |
223,665 | bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _cnvkit_prioritize | def _cnvkit_prioritize(sample, genes, allele_file, metrics_file):
"""Summarize non-diploid calls with copy numbers and confidence intervals.
"""
mdf = pd.read_table(metrics_file)
mdf.columns = [x.lower() for x in mdf.columns]
if len(genes) > 0:
mdf = mdf[mdf["gene"].str.contains("|".join(genes))]
mdf = mdf[["chromosome", "start", "end", "gene", "log2", "ci_hi", "ci_lo"]]
adf = pd.read_table(allele_file)
if len(genes) > 0:
adf = adf[adf["gene"].str.contains("|".join(genes))]
if "cn1" in adf.columns and "cn2" in adf.columns:
adf = adf[["chromosome", "start", "end", "cn", "cn1", "cn2"]]
else:
adf = adf[["chromosome", "start", "end", "cn"]]
df = pd.merge(mdf, adf, on=["chromosome", "start", "end"])
df = df[df["cn"] != 2]
if len(df) > 0:
def passes(row):
spread = abs(row["ci_hi"] - row["ci_lo"])
return spread < 0.25
df["passes"] = df.apply(passes, axis=1)
df.insert(0, "sample", [sample] * len(df))
return df | python | def _cnvkit_prioritize(sample, genes, allele_file, metrics_file):
"""Summarize non-diploid calls with copy numbers and confidence intervals.
"""
mdf = pd.read_table(metrics_file)
mdf.columns = [x.lower() for x in mdf.columns]
if len(genes) > 0:
mdf = mdf[mdf["gene"].str.contains("|".join(genes))]
mdf = mdf[["chromosome", "start", "end", "gene", "log2", "ci_hi", "ci_lo"]]
adf = pd.read_table(allele_file)
if len(genes) > 0:
adf = adf[adf["gene"].str.contains("|".join(genes))]
if "cn1" in adf.columns and "cn2" in adf.columns:
adf = adf[["chromosome", "start", "end", "cn", "cn1", "cn2"]]
else:
adf = adf[["chromosome", "start", "end", "cn"]]
df = pd.merge(mdf, adf, on=["chromosome", "start", "end"])
df = df[df["cn"] != 2]
if len(df) > 0:
def passes(row):
spread = abs(row["ci_hi"] - row["ci_lo"])
return spread < 0.25
df["passes"] = df.apply(passes, axis=1)
df.insert(0, "sample", [sample] * len(df))
return df | [
"def",
"_cnvkit_prioritize",
"(",
"sample",
",",
"genes",
",",
"allele_file",
",",
"metrics_file",
")",
":",
"mdf",
"=",
"pd",
".",
"read_table",
"(",
"metrics_file",
")",
"mdf",
".",
"columns",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
... | Summarize non-diploid calls with copy numbers and confidence intervals. | [
"Summarize",
"non",
"-",
"diploid",
"calls",
"with",
"copy",
"numbers",
"and",
"confidence",
"intervals",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L167-L190 |
223,666 | bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _cnv_prioritize | def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | python | def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | [
"def",
"_cnv_prioritize",
"(",
"data",
")",
":",
"supported",
"=",
"{",
"\"cnvkit\"",
":",
"{",
"\"inputs\"",
":",
"[",
"\"call_file\"",
",",
"\"segmetrics\"",
"]",
",",
"\"fn\"",
":",
"_cnvkit_prioritize",
"}",
"}",
"pcall",
"=",
"None",
"priority_files",
"... | Perform confidence interval based prioritization for CNVs. | [
"Perform",
"confidence",
"interval",
"based",
"prioritization",
"for",
"CNVs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L192-L217 |
223,667 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | get_default_jvm_opts | def get_default_jvm_opts(tmp_dir=None, parallel_gc=False):
"""Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
"""
opts = ["-XX:+UseSerialGC"] if not parallel_gc else []
if tmp_dir:
opts.append("-Djava.io.tmpdir=%s" % tmp_dir)
return opts | python | def get_default_jvm_opts(tmp_dir=None, parallel_gc=False):
"""Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
"""
opts = ["-XX:+UseSerialGC"] if not parallel_gc else []
if tmp_dir:
opts.append("-Djava.io.tmpdir=%s" % tmp_dir)
return opts | [
"def",
"get_default_jvm_opts",
"(",
"tmp_dir",
"=",
"None",
",",
"parallel_gc",
"=",
"False",
")",
":",
"opts",
"=",
"[",
"\"-XX:+UseSerialGC\"",
"]",
"if",
"not",
"parallel_gc",
"else",
"[",
"]",
"if",
"tmp_dir",
":",
"opts",
".",
"append",
"(",
"\"-Djava... | Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070 | [
"Retrieve",
"default",
"JVM",
"tuning",
"options"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L23-L38 |
223,668 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _get_gatk_opts | def _get_gatk_opts(config, names, tmp_dir=None, memscale=None, include_gatk=True, parallel_gc=False):
"""Retrieve GATK memory specifications, moving down a list of potential specifications.
"""
if include_gatk and "gatk4" in dd.get_tools_off({"config": config}):
opts = ["-U", "LENIENT_VCF_PROCESSING", "--read_filter",
"BadCigar", "--read_filter", "NotPrimaryAlignment"]
else:
opts = []
jvm_opts = ["-Xms750m", "-Xmx2g"]
for n in names:
resources = config_utils.get_resources(n, config)
if resources and resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
break
if memscale:
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": memscale}})
jvm_opts += get_default_jvm_opts(tmp_dir, parallel_gc=parallel_gc)
return jvm_opts + opts | python | def _get_gatk_opts(config, names, tmp_dir=None, memscale=None, include_gatk=True, parallel_gc=False):
"""Retrieve GATK memory specifications, moving down a list of potential specifications.
"""
if include_gatk and "gatk4" in dd.get_tools_off({"config": config}):
opts = ["-U", "LENIENT_VCF_PROCESSING", "--read_filter",
"BadCigar", "--read_filter", "NotPrimaryAlignment"]
else:
opts = []
jvm_opts = ["-Xms750m", "-Xmx2g"]
for n in names:
resources = config_utils.get_resources(n, config)
if resources and resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
break
if memscale:
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": memscale}})
jvm_opts += get_default_jvm_opts(tmp_dir, parallel_gc=parallel_gc)
return jvm_opts + opts | [
"def",
"_get_gatk_opts",
"(",
"config",
",",
"names",
",",
"tmp_dir",
"=",
"None",
",",
"memscale",
"=",
"None",
",",
"include_gatk",
"=",
"True",
",",
"parallel_gc",
"=",
"False",
")",
":",
"if",
"include_gatk",
"and",
"\"gatk4\"",
"in",
"dd",
".",
"get... | Retrieve GATK memory specifications, moving down a list of potential specifications. | [
"Retrieve",
"GATK",
"memory",
"specifications",
"moving",
"down",
"a",
"list",
"of",
"potential",
"specifications",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L40-L57 |
223,669 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _clean_java_out | def _clean_java_out(version_str):
"""Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output.
"""
out = []
for line in version_str.decode().split("\n"):
if line.startswith("Picked up"):
pass
if line.find("setlocale") > 0:
pass
else:
out.append(line)
return "\n".join(out) | python | def _clean_java_out(version_str):
"""Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output.
"""
out = []
for line in version_str.decode().split("\n"):
if line.startswith("Picked up"):
pass
if line.find("setlocale") > 0:
pass
else:
out.append(line)
return "\n".join(out) | [
"def",
"_clean_java_out",
"(",
"version_str",
")",
":",
"out",
"=",
"[",
"]",
"for",
"line",
"in",
"version_str",
".",
"decode",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"Picked up\"",
")",
":",
"pass",
... | Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output. | [
"Remove",
"extra",
"environmental",
"information",
"reported",
"in",
"java",
"when",
"querying",
"for",
"versions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L69-L82 |
223,670 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | get_mutect_version | def get_mutect_version(mutect_jar):
"""Retrieves version from input jar name since there is not an easy way to get MuTect version.
Check mutect jar for SomaticIndelDetector, which is an Appistry feature
"""
cl = ["java", "-Xms128m", "-Xmx256m"] + get_default_jvm_opts() + ["-jar", mutect_jar, "-h"]
with closing(subprocess.Popen(cl, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout) as stdout:
if "SomaticIndelDetector" in stdout.read().strip():
mutect_type = "-appistry"
else:
mutect_type = ""
version = os.path.basename(mutect_jar).lower()
for to_remove in [".jar", "-standalone", "mutect"]:
version = version.replace(to_remove, "")
if version.startswith(("-", ".")):
version = version[1:]
if not version:
raise ValueError("Unable to determine MuTect version from jar file. "
"Need to have version contained in jar (ie. muTect-1.1.5.jar): %s" % mutect_jar)
_check_for_bad_version(version, "MuTect")
return version + mutect_type | python | def get_mutect_version(mutect_jar):
"""Retrieves version from input jar name since there is not an easy way to get MuTect version.
Check mutect jar for SomaticIndelDetector, which is an Appistry feature
"""
cl = ["java", "-Xms128m", "-Xmx256m"] + get_default_jvm_opts() + ["-jar", mutect_jar, "-h"]
with closing(subprocess.Popen(cl, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout) as stdout:
if "SomaticIndelDetector" in stdout.read().strip():
mutect_type = "-appistry"
else:
mutect_type = ""
version = os.path.basename(mutect_jar).lower()
for to_remove in [".jar", "-standalone", "mutect"]:
version = version.replace(to_remove, "")
if version.startswith(("-", ".")):
version = version[1:]
if not version:
raise ValueError("Unable to determine MuTect version from jar file. "
"Need to have version contained in jar (ie. muTect-1.1.5.jar): %s" % mutect_jar)
_check_for_bad_version(version, "MuTect")
return version + mutect_type | [
"def",
"get_mutect_version",
"(",
"mutect_jar",
")",
":",
"cl",
"=",
"[",
"\"java\"",
",",
"\"-Xms128m\"",
",",
"\"-Xmx256m\"",
"]",
"+",
"get_default_jvm_opts",
"(",
")",
"+",
"[",
"\"-jar\"",
",",
"mutect_jar",
",",
"\"-h\"",
"]",
"with",
"closing",
"(",
... | Retrieves version from input jar name since there is not an easy way to get MuTect version.
Check mutect jar for SomaticIndelDetector, which is an Appistry feature | [
"Retrieves",
"version",
"from",
"input",
"jar",
"name",
"since",
"there",
"is",
"not",
"an",
"easy",
"way",
"to",
"get",
"MuTect",
"version",
".",
"Check",
"mutect",
"jar",
"for",
"SomaticIndelDetector",
"which",
"is",
"an",
"Appistry",
"feature"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L110-L129 |
223,671 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | gatk_cmd | def gatk_cmd(name, jvm_opts, params, config=None):
"""Retrieve PATH to gatk using locally installed java.
"""
if name == "gatk":
if isinstance(config, dict) and "config" not in config:
data = {"config": config}
else:
data = config
if not data or "gatk4" not in dd.get_tools_off(data):
return _gatk4_cmd(jvm_opts, params, data)
else:
name = "gatk3"
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), name))
# if we can't find via the local executable, fallback to being in the path
if not gatk_cmd:
gatk_cmd = utils.which(name)
if gatk_cmd:
return "%s && export PATH=%s:\"$PATH\" && %s %s %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd), gatk_cmd,
" ".join(jvm_opts), " ".join([str(x) for x in params])) | python | def gatk_cmd(name, jvm_opts, params, config=None):
"""Retrieve PATH to gatk using locally installed java.
"""
if name == "gatk":
if isinstance(config, dict) and "config" not in config:
data = {"config": config}
else:
data = config
if not data or "gatk4" not in dd.get_tools_off(data):
return _gatk4_cmd(jvm_opts, params, data)
else:
name = "gatk3"
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), name))
# if we can't find via the local executable, fallback to being in the path
if not gatk_cmd:
gatk_cmd = utils.which(name)
if gatk_cmd:
return "%s && export PATH=%s:\"$PATH\" && %s %s %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd), gatk_cmd,
" ".join(jvm_opts), " ".join([str(x) for x in params])) | [
"def",
"gatk_cmd",
"(",
"name",
",",
"jvm_opts",
",",
"params",
",",
"config",
"=",
"None",
")",
":",
"if",
"name",
"==",
"\"gatk\"",
":",
"if",
"isinstance",
"(",
"config",
",",
"dict",
")",
"and",
"\"config\"",
"not",
"in",
"config",
":",
"data",
"... | Retrieve PATH to gatk using locally installed java. | [
"Retrieve",
"PATH",
"to",
"gatk",
"using",
"locally",
"installed",
"java",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L530-L549 |
223,672 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _gatk4_cmd | def _gatk4_cmd(jvm_opts, params, data):
"""Retrieve unified command for GATK4, using 'gatk'. GATK3 is 'gatk3'.
"""
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "gatk"))
return "%s && export PATH=%s:\"$PATH\" && gatk --java-options '%s' %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd),
" ".join(jvm_opts), " ".join([str(x) for x in params])) | python | def _gatk4_cmd(jvm_opts, params, data):
"""Retrieve unified command for GATK4, using 'gatk'. GATK3 is 'gatk3'.
"""
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "gatk"))
return "%s && export PATH=%s:\"$PATH\" && gatk --java-options '%s' %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd),
" ".join(jvm_opts), " ".join([str(x) for x in params])) | [
"def",
"_gatk4_cmd",
"(",
"jvm_opts",
",",
"params",
",",
"data",
")",
":",
"gatk_cmd",
"=",
"utils",
".",
"which",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"sys",
... | Retrieve unified command for GATK4, using 'gatk'. GATK3 is 'gatk3'. | [
"Retrieve",
"unified",
"command",
"for",
"GATK4",
"using",
"gatk",
".",
"GATK3",
"is",
"gatk3",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L551-L557 |
223,673 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | runner_from_path | def runner_from_path(cmd, config):
"""Simple command line runner that expects a bash cmd in the PATH.
This makes Picard tools back compatible with new approach of a single
jar + bash script.
"""
if cmd.endswith("picard"):
return PicardCmdRunner(cmd, config)
else:
raise ValueError("Do not support PATH running for %s" % cmd) | python | def runner_from_path(cmd, config):
"""Simple command line runner that expects a bash cmd in the PATH.
This makes Picard tools back compatible with new approach of a single
jar + bash script.
"""
if cmd.endswith("picard"):
return PicardCmdRunner(cmd, config)
else:
raise ValueError("Do not support PATH running for %s" % cmd) | [
"def",
"runner_from_path",
"(",
"cmd",
",",
"config",
")",
":",
"if",
"cmd",
".",
"endswith",
"(",
"\"picard\"",
")",
":",
"return",
"PicardCmdRunner",
"(",
"cmd",
",",
"config",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Do not support PATH running for ... | Simple command line runner that expects a bash cmd in the PATH.
This makes Picard tools back compatible with new approach of a single
jar + bash script. | [
"Simple",
"command",
"line",
"runner",
"that",
"expects",
"a",
"bash",
"cmd",
"in",
"the",
"PATH",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L587-L596 |
223,674 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner._set_default_versions | def _set_default_versions(self, config):
"""Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis.
"""
out = []
for name in ["gatk", "gatk4", "picard", "mutect"]:
v = tz.get_in(["resources", name, "version"], config)
if not v:
try:
v = programs.get_version(name, config=config)
except KeyError:
v = None
out.append(v)
self._gatk_version, self._gatk4_version, self._picard_version, self._mutect_version = out | python | def _set_default_versions(self, config):
"""Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis.
"""
out = []
for name in ["gatk", "gatk4", "picard", "mutect"]:
v = tz.get_in(["resources", name, "version"], config)
if not v:
try:
v = programs.get_version(name, config=config)
except KeyError:
v = None
out.append(v)
self._gatk_version, self._gatk4_version, self._picard_version, self._mutect_version = out | [
"def",
"_set_default_versions",
"(",
"self",
",",
"config",
")",
":",
"out",
"=",
"[",
"]",
"for",
"name",
"in",
"[",
"\"gatk\"",
",",
"\"gatk4\"",
",",
"\"picard\"",
",",
"\"mutect\"",
"]",
":",
"v",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"resources\""... | Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis. | [
"Retrieve",
"pre",
"-",
"computed",
"version",
"information",
"for",
"expensive",
"to",
"retrieve",
"versions",
".",
"Starting",
"up",
"GATK",
"takes",
"a",
"lot",
"of",
"resources",
"so",
"we",
"do",
"it",
"once",
"at",
"start",
"of",
"analysis",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L171-L184 |
223,675 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.new_resources | def new_resources(self, program):
"""Set new resource usage for the given program.
This allows customization of memory usage for particular sub-programs
of GATK like HaplotypeCaller.
"""
resources = config_utils.get_resources(program, self._config)
if resources.get("jvm_opts"):
self._jvm_opts = resources.get("jvm_opts") | python | def new_resources(self, program):
"""Set new resource usage for the given program.
This allows customization of memory usage for particular sub-programs
of GATK like HaplotypeCaller.
"""
resources = config_utils.get_resources(program, self._config)
if resources.get("jvm_opts"):
self._jvm_opts = resources.get("jvm_opts") | [
"def",
"new_resources",
"(",
"self",
",",
"program",
")",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"program",
",",
"self",
".",
"_config",
")",
"if",
"resources",
".",
"get",
"(",
"\"jvm_opts\"",
")",
":",
"self",
".",
"_jvm_opts",... | Set new resource usage for the given program.
This allows customization of memory usage for particular sub-programs
of GATK like HaplotypeCaller. | [
"Set",
"new",
"resource",
"usage",
"for",
"the",
"given",
"program",
".",
"This",
"allows",
"customization",
"of",
"memory",
"usage",
"for",
"particular",
"sub",
"-",
"programs",
"of",
"GATK",
"like",
"HaplotypeCaller",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L186-L193 |
223,676 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.run_fn | def run_fn(self, name, *args, **kwds):
"""Run pre-built functionality that used Broad tools by name.
See the gatkrun, picardrun module for available functions.
"""
fn = None
to_check = [picardrun]
for ns in to_check:
try:
fn = getattr(ns, name)
break
except AttributeError:
pass
assert fn is not None, "Could not find function %s in %s" % (name, to_check)
return fn(self, *args, **kwds) | python | def run_fn(self, name, *args, **kwds):
"""Run pre-built functionality that used Broad tools by name.
See the gatkrun, picardrun module for available functions.
"""
fn = None
to_check = [picardrun]
for ns in to_check:
try:
fn = getattr(ns, name)
break
except AttributeError:
pass
assert fn is not None, "Could not find function %s in %s" % (name, to_check)
return fn(self, *args, **kwds) | [
"def",
"run_fn",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"fn",
"=",
"None",
"to_check",
"=",
"[",
"picardrun",
"]",
"for",
"ns",
"in",
"to_check",
":",
"try",
":",
"fn",
"=",
"getattr",
"(",
"ns",
",",
"name... | Run pre-built functionality that used Broad tools by name.
See the gatkrun, picardrun module for available functions. | [
"Run",
"pre",
"-",
"built",
"functionality",
"that",
"used",
"Broad",
"tools",
"by",
"name",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L195-L209 |
223,677 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.cl_picard | def cl_picard(self, command, options, memscale=None):
"""Prepare a Picard commandline.
"""
options = ["%s=%s" % (x, y) for x, y in options]
options.append("VALIDATION_STRINGENCY=SILENT")
return self._get_picard_cmd(command, memscale=memscale) + options | python | def cl_picard(self, command, options, memscale=None):
"""Prepare a Picard commandline.
"""
options = ["%s=%s" % (x, y) for x, y in options]
options.append("VALIDATION_STRINGENCY=SILENT")
return self._get_picard_cmd(command, memscale=memscale) + options | [
"def",
"cl_picard",
"(",
"self",
",",
"command",
",",
"options",
",",
"memscale",
"=",
"None",
")",
":",
"options",
"=",
"[",
"\"%s=%s\"",
"%",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"options",
"]",
"options",
".",
"append",
"(",
"\... | Prepare a Picard commandline. | [
"Prepare",
"a",
"Picard",
"commandline",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L211-L216 |
223,678 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.run | def run(self, command, options, pipe=False, get_stdout=False, memscale=None):
"""Run a Picard command with the provided option pairs.
"""
cl = self.cl_picard(command, options, memscale=memscale)
if pipe:
subprocess.Popen(cl)
elif get_stdout:
p = subprocess.Popen(cl, stdout=subprocess.PIPE)
stdout = p.stdout.read()
p.wait()
p.stdout.close()
return stdout
else:
do.run(cl, "Picard {0}".format(command), None) | python | def run(self, command, options, pipe=False, get_stdout=False, memscale=None):
"""Run a Picard command with the provided option pairs.
"""
cl = self.cl_picard(command, options, memscale=memscale)
if pipe:
subprocess.Popen(cl)
elif get_stdout:
p = subprocess.Popen(cl, stdout=subprocess.PIPE)
stdout = p.stdout.read()
p.wait()
p.stdout.close()
return stdout
else:
do.run(cl, "Picard {0}".format(command), None) | [
"def",
"run",
"(",
"self",
",",
"command",
",",
"options",
",",
"pipe",
"=",
"False",
",",
"get_stdout",
"=",
"False",
",",
"memscale",
"=",
"None",
")",
":",
"cl",
"=",
"self",
".",
"cl_picard",
"(",
"command",
",",
"options",
",",
"memscale",
"=",
... | Run a Picard command with the provided option pairs. | [
"Run",
"a",
"Picard",
"command",
"with",
"the",
"provided",
"option",
"pairs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L218-L231 |
223,679 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.cl_mutect | def cl_mutect(self, params, tmp_dir):
"""Define parameters to run the mutect paired algorithm.
"""
gatk_jar = self._get_jar("muTect", ["mutect"])
# Decrease memory slightly from configuration to avoid memory allocation errors
jvm_opts = config_utils.adjust_opts(self._jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
return ["java"] + jvm_opts + get_default_jvm_opts(tmp_dir) + \
["-jar", gatk_jar] + [str(x) for x in params] | python | def cl_mutect(self, params, tmp_dir):
"""Define parameters to run the mutect paired algorithm.
"""
gatk_jar = self._get_jar("muTect", ["mutect"])
# Decrease memory slightly from configuration to avoid memory allocation errors
jvm_opts = config_utils.adjust_opts(self._jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
return ["java"] + jvm_opts + get_default_jvm_opts(tmp_dir) + \
["-jar", gatk_jar] + [str(x) for x in params] | [
"def",
"cl_mutect",
"(",
"self",
",",
"params",
",",
"tmp_dir",
")",
":",
"gatk_jar",
"=",
"self",
".",
"_get_jar",
"(",
"\"muTect\"",
",",
"[",
"\"mutect\"",
"]",
")",
"# Decrease memory slightly from configuration to avoid memory allocation errors",
"jvm_opts",
"=",... | Define parameters to run the mutect paired algorithm. | [
"Define",
"parameters",
"to",
"run",
"the",
"mutect",
"paired",
"algorithm",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L330-L339 |
223,680 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.run_gatk | def run_gatk(self, params, tmp_dir=None, log_error=True,
data=None, region=None, memscale=None, parallel_gc=False, ld_preload=False):
"""Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow
"""
needs_java7 = LooseVersion(self.get_gatk_version()) < LooseVersion("3.6")
# For old Java requirements use global java 7
if needs_java7:
setpath.remove_bcbiopath()
with tx_tmpdir(self._config) as local_tmp_dir:
if tmp_dir is None:
tmp_dir = local_tmp_dir
cl = self.cl_gatk(params, tmp_dir, memscale=memscale, parallel_gc=parallel_gc)
atype_index = params.index("-T") if params.count("-T") > 0 \
else params.index("--analysis_type")
prog = params[atype_index + 1]
cl = fix_missing_spark_user(cl, prog, params)
if ld_preload:
cl = "export LD_PRELOAD=%s/lib/libopenblas.so && %s" % (os.path.dirname(utils.get_bcbio_bin()), cl)
do.run(cl, "GATK: {0}".format(prog), data, region=region,
log_error=log_error)
if needs_java7:
setpath.prepend_bcbiopath() | python | def run_gatk(self, params, tmp_dir=None, log_error=True,
data=None, region=None, memscale=None, parallel_gc=False, ld_preload=False):
"""Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow
"""
needs_java7 = LooseVersion(self.get_gatk_version()) < LooseVersion("3.6")
# For old Java requirements use global java 7
if needs_java7:
setpath.remove_bcbiopath()
with tx_tmpdir(self._config) as local_tmp_dir:
if tmp_dir is None:
tmp_dir = local_tmp_dir
cl = self.cl_gatk(params, tmp_dir, memscale=memscale, parallel_gc=parallel_gc)
atype_index = params.index("-T") if params.count("-T") > 0 \
else params.index("--analysis_type")
prog = params[atype_index + 1]
cl = fix_missing_spark_user(cl, prog, params)
if ld_preload:
cl = "export LD_PRELOAD=%s/lib/libopenblas.so && %s" % (os.path.dirname(utils.get_bcbio_bin()), cl)
do.run(cl, "GATK: {0}".format(prog), data, region=region,
log_error=log_error)
if needs_java7:
setpath.prepend_bcbiopath() | [
"def",
"run_gatk",
"(",
"self",
",",
"params",
",",
"tmp_dir",
"=",
"None",
",",
"log_error",
"=",
"True",
",",
"data",
"=",
"None",
",",
"region",
"=",
"None",
",",
"memscale",
"=",
"None",
",",
"parallel_gc",
"=",
"False",
",",
"ld_preload",
"=",
"... | Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow | [
"Top",
"level",
"interface",
"to",
"running",
"a",
"GATK",
"command",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L341-L365 |
223,681 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.get_gatk_version | def get_gatk_version(self):
"""Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information.
"""
if self._gatk_version is None:
self._set_default_versions(self._config)
if "gatk4" not in dd.get_tools_off({"config": self._config}):
# In cases whwere we don't have manifest versions. Not possible to get
# version from commandline with GATK4 alpha version
if self._gatk4_version is None:
self._gatk4_version = "4.0"
return self._gatk4_version
elif self._gatk_version is not None:
return self._gatk_version
else:
if self._has_gatk_conda_wrapper():
gatk_jar = None
else:
gatk_jar = self._get_jar("GenomeAnalysisTK", ["GenomeAnalysisTKLite"], allow_missing=True)
self._gatk_version = get_gatk_version(gatk_jar, config=self._config)
return self._gatk_version | python | def get_gatk_version(self):
"""Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information.
"""
if self._gatk_version is None:
self._set_default_versions(self._config)
if "gatk4" not in dd.get_tools_off({"config": self._config}):
# In cases whwere we don't have manifest versions. Not possible to get
# version from commandline with GATK4 alpha version
if self._gatk4_version is None:
self._gatk4_version = "4.0"
return self._gatk4_version
elif self._gatk_version is not None:
return self._gatk_version
else:
if self._has_gatk_conda_wrapper():
gatk_jar = None
else:
gatk_jar = self._get_jar("GenomeAnalysisTK", ["GenomeAnalysisTKLite"], allow_missing=True)
self._gatk_version = get_gatk_version(gatk_jar, config=self._config)
return self._gatk_version | [
"def",
"get_gatk_version",
"(",
"self",
")",
":",
"if",
"self",
".",
"_gatk_version",
"is",
"None",
":",
"self",
".",
"_set_default_versions",
"(",
"self",
".",
"_config",
")",
"if",
"\"gatk4\"",
"not",
"in",
"dd",
".",
"get_tools_off",
"(",
"{",
"\"config... | Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information. | [
"Retrieve",
"GATK",
"version",
"handling",
"locally",
"and",
"config",
"cached",
"versions",
".",
"Calling",
"version",
"can",
"be",
"expensive",
"due",
"to",
"all",
"the",
"startup",
"and",
"shutdown",
"of",
"JVMs",
"so",
"we",
"prefer",
"cached",
"version",
... | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L376-L398 |
223,682 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.get_mutect_version | def get_mutect_version(self):
"""Retrieve the Mutect version.
"""
if self._mutect_version is None:
mutect_jar = self._get_jar("muTect", ["mutect"])
self._mutect_version = get_mutect_version(mutect_jar)
return self._mutect_version | python | def get_mutect_version(self):
"""Retrieve the Mutect version.
"""
if self._mutect_version is None:
mutect_jar = self._get_jar("muTect", ["mutect"])
self._mutect_version = get_mutect_version(mutect_jar)
return self._mutect_version | [
"def",
"get_mutect_version",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mutect_version",
"is",
"None",
":",
"mutect_jar",
"=",
"self",
".",
"_get_jar",
"(",
"\"muTect\"",
",",
"[",
"\"mutect\"",
"]",
")",
"self",
".",
"_mutect_version",
"=",
"get_mutect_ver... | Retrieve the Mutect version. | [
"Retrieve",
"the",
"Mutect",
"version",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L400-L406 |
223,683 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.gatk_major_version | def gatk_major_version(self):
"""Retrieve the GATK major version, handling multiple GATK distributions.
Has special cases for GATK nightly builds, Appistry releases and
GATK prior to 2.3.
"""
full_version = self.get_gatk_version()
# Working with a recent version if using nightlies
if full_version.startswith("nightly-"):
return "3.6"
parts = full_version.split("-")
if len(parts) == 4:
appistry_release, version, subversion, githash = parts
elif len(parts) == 3:
version, subversion, githash = parts
elif len(parts) == 2:
version, subversion = parts
elif len(parts) == 1:
version = parts[0]
# version was not properly implemented in earlier GATKs
else:
version = "2.3"
if version.startswith("v"):
version = version[1:]
return version | python | def gatk_major_version(self):
"""Retrieve the GATK major version, handling multiple GATK distributions.
Has special cases for GATK nightly builds, Appistry releases and
GATK prior to 2.3.
"""
full_version = self.get_gatk_version()
# Working with a recent version if using nightlies
if full_version.startswith("nightly-"):
return "3.6"
parts = full_version.split("-")
if len(parts) == 4:
appistry_release, version, subversion, githash = parts
elif len(parts) == 3:
version, subversion, githash = parts
elif len(parts) == 2:
version, subversion = parts
elif len(parts) == 1:
version = parts[0]
# version was not properly implemented in earlier GATKs
else:
version = "2.3"
if version.startswith("v"):
version = version[1:]
return version | [
"def",
"gatk_major_version",
"(",
"self",
")",
":",
"full_version",
"=",
"self",
".",
"get_gatk_version",
"(",
")",
"# Working with a recent version if using nightlies",
"if",
"full_version",
".",
"startswith",
"(",
"\"nightly-\"",
")",
":",
"return",
"\"3.6\"",
"part... | Retrieve the GATK major version, handling multiple GATK distributions.
Has special cases for GATK nightly builds, Appistry releases and
GATK prior to 2.3. | [
"Retrieve",
"the",
"GATK",
"major",
"version",
"handling",
"multiple",
"GATK",
"distributions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L420-L444 |
223,684 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner._get_picard_cmd | def _get_picard_cmd(self, command, memscale=None):
"""Retrieve the base Picard command, handling both shell scripts and directory of jars.
"""
resources = config_utils.get_resources("picard", self._config)
if memscale:
jvm_opts = get_picard_opts(self._config, memscale=memscale)
elif resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
else:
jvm_opts = self._jvm_opts
if os.path.isdir(self._picard_ref):
dist_file = self._get_jar(command)
return ["java"] + jvm_opts + get_default_jvm_opts() + ["-jar", dist_file]
else:
# XXX Cannot currently set JVM opts with picard-tools script
return [self._picard_ref, command] | python | def _get_picard_cmd(self, command, memscale=None):
"""Retrieve the base Picard command, handling both shell scripts and directory of jars.
"""
resources = config_utils.get_resources("picard", self._config)
if memscale:
jvm_opts = get_picard_opts(self._config, memscale=memscale)
elif resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
else:
jvm_opts = self._jvm_opts
if os.path.isdir(self._picard_ref):
dist_file = self._get_jar(command)
return ["java"] + jvm_opts + get_default_jvm_opts() + ["-jar", dist_file]
else:
# XXX Cannot currently set JVM opts with picard-tools script
return [self._picard_ref, command] | [
"def",
"_get_picard_cmd",
"(",
"self",
",",
"command",
",",
"memscale",
"=",
"None",
")",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"picard\"",
",",
"self",
".",
"_config",
")",
"if",
"memscale",
":",
"jvm_opts",
"=",
"get_picard_op... | Retrieve the base Picard command, handling both shell scripts and directory of jars. | [
"Retrieve",
"the",
"base",
"Picard",
"command",
"handling",
"both",
"shell",
"scripts",
"and",
"directory",
"of",
"jars",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L446-L461 |
223,685 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner._get_jar | def _get_jar(self, command, alts=None, allow_missing=False):
"""Retrieve the jar for running the specified command.
"""
dirs = []
for bdir in [self._gatk_dir, self._picard_ref]:
dirs.extend([bdir,
os.path.join(bdir, os.pardir, "gatk")])
if alts is None: alts = []
for check_cmd in [command] + alts:
for dir_check in dirs:
try:
check_file = config_utils.get_jar(check_cmd, dir_check)
return check_file
except ValueError as msg:
if str(msg).find("multiple") > 0:
raise
else:
pass
if allow_missing:
return None
else:
raise ValueError("Could not find jar %s in %s:%s" % (command, self._picard_ref, self._gatk_dir)) | python | def _get_jar(self, command, alts=None, allow_missing=False):
"""Retrieve the jar for running the specified command.
"""
dirs = []
for bdir in [self._gatk_dir, self._picard_ref]:
dirs.extend([bdir,
os.path.join(bdir, os.pardir, "gatk")])
if alts is None: alts = []
for check_cmd in [command] + alts:
for dir_check in dirs:
try:
check_file = config_utils.get_jar(check_cmd, dir_check)
return check_file
except ValueError as msg:
if str(msg).find("multiple") > 0:
raise
else:
pass
if allow_missing:
return None
else:
raise ValueError("Could not find jar %s in %s:%s" % (command, self._picard_ref, self._gatk_dir)) | [
"def",
"_get_jar",
"(",
"self",
",",
"command",
",",
"alts",
"=",
"None",
",",
"allow_missing",
"=",
"False",
")",
":",
"dirs",
"=",
"[",
"]",
"for",
"bdir",
"in",
"[",
"self",
".",
"_gatk_dir",
",",
"self",
".",
"_picard_ref",
"]",
":",
"dirs",
".... | Retrieve the jar for running the specified command. | [
"Retrieve",
"the",
"jar",
"for",
"running",
"the",
"specified",
"command",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L463-L484 |
223,686 | bcbio/bcbio-nextgen | bcbio/utils.py | cpmap | def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown() | python | def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown() | [
"def",
"cpmap",
"(",
"cores",
"=",
"1",
")",
":",
"if",
"int",
"(",
"cores",
")",
"==",
"1",
":",
"yield",
"itertools",
".",
"imap",
"else",
":",
"if",
"futures",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"concurrent.futures not available\"",
")"... | Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores | [
"Configurable",
"parallel",
"map",
"context",
"manager",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L34-L48 |
223,687 | bcbio/bcbio-nextgen | bcbio/utils.py | map_wrap | def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper | python | def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper | [
"def",
"map_wrap",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Wrap standard function to easily pass into 'map' processing. | [
"Wrap",
"standard",
"function",
"to",
"easily",
"pass",
"into",
"map",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L50-L56 |
223,688 | bcbio/bcbio-nextgen | bcbio/utils.py | unpack_worlds | def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out | python | def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out | [
"def",
"unpack_worlds",
"(",
"items",
")",
":",
"# Unpack nested lists of samples grouped together (old IPython style)",
"if",
"isinstance",
"(",
"items",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"items",
"[",
"0",
"]",
")",
... | Handle all the ways we can pass multiple samples for back-compatibility. | [
"Handle",
"all",
"the",
"ways",
"we",
"can",
"pass",
"multiple",
"samples",
"for",
"back",
"-",
"compatibility",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L160-L174 |
223,689 | bcbio/bcbio-nextgen | bcbio/utils.py | safe_makedir | def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname | python | def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname | [
"def",
"safe_makedir",
"(",
"dname",
")",
":",
"if",
"not",
"dname",
":",
"return",
"dname",
"num_tries",
"=",
"0",
"max_tries",
"=",
"5",
"while",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dname",
")",
":",
"# we could get an error here if multiple pro... | Make a directory if it doesn't exist, handling concurrent race conditions. | [
"Make",
"a",
"directory",
"if",
"it",
"doesn",
"t",
"exist",
"handling",
"concurrent",
"race",
"conditions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L176-L193 |
223,690 | bcbio/bcbio-nextgen | bcbio/utils.py | tmpfile | def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname) | python | def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname) | [
"def",
"tmpfile",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"(",
"fd",
",",
"fname",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"yield",
"fname",
"finally",
":",
"os",
".",
"clos... | Make a tempfile, safely cleaning up file descriptors on completion. | [
"Make",
"a",
"tempfile",
"safely",
"cleaning",
"up",
"file",
"descriptors",
"on",
"completion",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L221-L230 |
223,691 | bcbio/bcbio-nextgen | bcbio/utils.py | file_exists | def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False | python | def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False | [
"def",
"file_exists",
"(",
"fname",
")",
":",
"try",
":",
"return",
"fname",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
"and",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
">",
"0",
"except",
"OSError",
":",
"return",
"Fal... | Check if a file exists and is non-empty. | [
"Check",
"if",
"a",
"file",
"exists",
"and",
"is",
"non",
"-",
"empty",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L232-L238 |
223,692 | bcbio/bcbio-nextgen | bcbio/utils.py | get_size | def get_size(path):
""" Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`.
"""
if os.path.isfile(path):
return os.path.getsize(path)
return sum(get_size(os.path.join(path, f)) for f in os.listdir(path)) | python | def get_size(path):
""" Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`.
"""
if os.path.isfile(path):
return os.path.getsize(path)
return sum(get_size(os.path.join(path, f)) for f in os.listdir(path)) | [
"def",
"get_size",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"return",
"sum",
"(",
"get_size",
"(",
"os",
".",
"path",
".",
"join",
"(",
... | Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`. | [
"Returns",
"the",
"size",
"in",
"bytes",
"if",
"path",
"is",
"a",
"file",
"or",
"the",
"size",
"of",
"all",
"files",
"in",
"path",
"if",
"it",
"s",
"a",
"directory",
".",
"Analogous",
"to",
"du",
"-",
"s",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L241-L248 |
223,693 | bcbio/bcbio-nextgen | bcbio/utils.py | read_galaxy_amqp_config | def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = six.moves.configparser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config | python | def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = six.moves.configparser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config | [
"def",
"read_galaxy_amqp_config",
"(",
"galaxy_config",
",",
"base_dir",
")",
":",
"galaxy_config",
"=",
"add_full_path",
"(",
"galaxy_config",
",",
"base_dir",
")",
"config",
"=",
"six",
".",
"moves",
".",
"configparser",
".",
"ConfigParser",
"(",
")",
"config"... | Read connection information on the RabbitMQ server from Galaxy config. | [
"Read",
"connection",
"information",
"on",
"the",
"RabbitMQ",
"server",
"from",
"Galaxy",
"config",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L279-L288 |
223,694 | bcbio/bcbio-nextgen | bcbio/utils.py | move_safe | def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target | python | def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target | [
"def",
"move_safe",
"(",
"origin",
",",
"target",
")",
":",
"if",
"origin",
"==",
"target",
":",
"return",
"origin",
"if",
"file_exists",
"(",
"target",
")",
":",
"return",
"target",
"shutil",
".",
"move",
"(",
"origin",
",",
"target",
")",
"return",
"... | Move file, skip if exists | [
"Move",
"file",
"skip",
"if",
"exists"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L315-L324 |
223,695 | bcbio/bcbio-nextgen | bcbio/utils.py | file_plus_index | def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname] | python | def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname] | [
"def",
"file_plus_index",
"(",
"fname",
")",
":",
"exts",
"=",
"{",
"\".vcf\"",
":",
"\".idx\"",
",",
"\".bam\"",
":",
"\".bai\"",
",",
"\".vcf.gz\"",
":",
"\".tbi\"",
",",
"\".bed.gz\"",
":",
"\".tbi\"",
",",
"\".fq.gz\"",
":",
"\".gbi\"",
"}",
"ext",
"="... | Convert a file name into the file plus required indexes. | [
"Convert",
"a",
"file",
"name",
"into",
"the",
"file",
"plus",
"required",
"indexes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L326-L335 |
223,696 | bcbio/bcbio-nextgen | bcbio/utils.py | remove_plus | def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext) | python | def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext) | [
"def",
"remove_plus",
"(",
"orig",
")",
":",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".idx\"",
",",
"\".gbi\"",
",",
"\".tbi\"",
",",
"\".bai\"",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"orig",
"+",
"ext",
")",
":",
"remove_safe",
"(... | Remove a fils, including biological index files. | [
"Remove",
"a",
"fils",
"including",
"biological",
"index",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L337-L342 |
223,697 | bcbio/bcbio-nextgen | bcbio/utils.py | copy_plus | def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext) | python | def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext) | [
"def",
"copy_plus",
"(",
"orig",
",",
"new",
")",
":",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".idx\"",
",",
"\".gbi\"",
",",
"\".tbi\"",
",",
"\".bai\"",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"orig",
"+",
"ext",
")",
"and",
"("... | Copy a fils, including biological index files. | [
"Copy",
"a",
"fils",
"including",
"biological",
"index",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L344-L349 |
223,698 | bcbio/bcbio-nextgen | bcbio/utils.py | merge_config_files | def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.safe_load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out | python | def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.safe_load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out | [
"def",
"merge_config_files",
"(",
"fnames",
")",
":",
"def",
"_load_yaml",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"in_handle",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"in_handle",
")",
"return",
"config",
"out",
"=",
... | Merge configuration files, preferring definitions in latter files. | [
"Merge",
"configuration",
"files",
"preferring",
"definitions",
"in",
"latter",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L475-L490 |
223,699 | bcbio/bcbio-nextgen | bcbio/utils.py | deepish_copy | def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out | python | def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out | [
"def",
"deepish_copy",
"(",
"org",
")",
":",
"out",
"=",
"dict",
"(",
")",
".",
"fromkeys",
"(",
"org",
")",
"for",
"k",
",",
"v",
"in",
"org",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"out",
"[",
"k",... | Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/ | [
"Improved",
"speed",
"deep",
"copy",
"for",
"dictionaries",
"of",
"simple",
"python",
"types",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L492-L510 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.