code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if not chunks:
raise ValueError(
"No command parts: {} ({})".format(chunks, type(chunks)))
if isinstance(chunks, str):
return chunks
parsed_pieces = []
for cmd_part in chunks:
if cmd_part is None:
continue
try:
# Trim just space, not all whitespace.
# This prevents damage to an option that specifies,
# say, tab as a delimiter.
parsed_pieces.append(cmd_part.strip(" "))
except AttributeError:
option, argument = cmd_part
if argument is None or argument == "":
continue
option, argument = option.strip(" "), str(argument).strip(" ")
parsed_pieces.append("{} {}".format(option, argument))
return " ".join(parsed_pieces)
|
def build_command(chunks)
|
Create a command from various parts.
The parts provided may include a base, flags, option-bound arguments, and
positional arguments. Each element must be either a string or a two-tuple.
Raw strings are interpreted as either the command base, a pre-joined
pair (or multiple pairs) of option and argument, a series of positional
arguments, or a combination of those elements. The only modification they
undergo is trimming of any space characters from each end.
:param Iterable[str | (str, str | NoneType)] chunks: the collection of the
command components to interpret, modify, and join to create a
single meaningful command
:return str: the single meaningful command built from the given components
:raise ValueError: if no command parts are provided
| 4.786461
| 4.237336
| 1.129592
|
for path_name, path in sample.paths.items():
print("{}: '{}'".format(path_name, path))
base, ext = os.path.splitext(path)
if ext:
print("Skipping file-like: '[}'".format(path))
elif not os.path.isdir(base):
os.makedirs(base)
|
def build_sample_paths(sample)
|
Ensure existence of folders for a Sample.
:param looper.models.Sample sample: Sample (or instance supporting get()
that stores folders paths in a 'paths' key, in which the value is a
mapping from path name to actual folder path)
| 5.201777
| 4.902782
| 1.060985
|
# Allow Stage as type for checkpoint parameter's argument without
# needing to import here the Stage type from stage.py module.
try:
base = checkpoint.checkpoint_name
except AttributeError:
base = translate_stage_name(checkpoint)
if pipeline_name:
base = "{}{}{}".format(
pipeline_name, PIPELINE_CHECKPOINT_DELIMITER, base)
return base + CHECKPOINT_EXTENSION
|
def checkpoint_filename(checkpoint, pipeline_name=None)
|
Translate a checkpoint to a filename.
This not only adds the checkpoint file extension but also standardizes the
way in which checkpoint names are mapped to filenames.
:param str | pypiper.Stage checkpoint: name of a pipeline phase/stage
:param str pipeline_name: name of pipeline to prepend to the checkpoint
filename; this differentiates checkpoint files, e.g. within the
same sample output folder but associated with different pipelines,
in case of the (somewhat probable) scenario of a stage name
collision between pipelines the processed the same sample and
wrote to the same output folder
:return str | NoneType: standardized checkpoint name for file, plus
extension; null if the input is a Stage that's designated as a
non-checkpoint
| 8.84571
| 8.361936
| 1.057854
|
# Handle case in which checkpoint is given not just as a string, but
# as a checkpoint-like filename. Don't worry about absolute path status
# of a potential filename input, or whether it's in the pipeline's
# output folder. That's handled upstream. While this isn't a protected
# function, there's no real reason to call this from outside the package.
if isinstance(checkpoint, str):
if os.path.isabs(checkpoint):
if is_in_file_tree(checkpoint, pm.outfolder):
return checkpoint
else:
raise ValueError(
"Absolute checkpoint path '{}' is not in pipeline output "
"folder '{}'".format(checkpoint, pm.outfolder))
_, ext = os.path.splitext(checkpoint)
if ext == CHECKPOINT_EXTENSION:
return pipeline_filepath(pm, filename=checkpoint)
# Allow Pipeline as pm type without importing Pipeline.
try:
pm = pm.manager
except AttributeError:
pass
# We want the checkpoint filename itself to become a suffix, with a
# delimiter intervening between the pipeline name and the checkpoint
# name + extension. This is to handle the case in which a single, e.g.,
# sample's output folder is the destination for output from multiple
# pipelines, and we thus want to be able to distinguish between
# checkpoint files from different pipelines for that sample that may
# well define one or more stages with the same name (e.g., trim_reads,
# align_reads, etc.)
chkpt_name = checkpoint_filename(checkpoint, pipeline_name=pm.name)
return pipeline_filepath(pm, filename=chkpt_name)
|
def checkpoint_filepath(checkpoint, pm)
|
Create filepath for indicated checkpoint.
:param str | pypiper.Stage checkpoint: Pipeline phase/stage or one's name
:param pypiper.PipelineManager | pypiper.Pipeline pm: manager of a pipeline
instance, relevant for output folder path.
:return str: standardized checkpoint name for file, plus extension
:raise ValueError: if the checkpoint is given as absolute path that does
not point within pipeline output folder
| 7.26551
| 6.805312
| 1.067623
|
if isinstance(shell, bool):
return shell
return "|" in cmd or ">" in cmd or r"*" in cmd
|
def check_shell(cmd, shell=None)
|
Determine whether a command appears to involve shell process(es).
The shell argument can be used to override the result of the check.
:param str cmd: Command to investigate.
:param bool shell: override the result of the check with this value.
:return bool: Whether the command appears to involve shell process(es).
| 9.312864
| 10.950527
| 0.850449
|
curly_brackets = True
while curly_brackets:
SRE_match_obj = re.search(r'\{(.*?)}',cmd)
if SRE_match_obj is not None:
cmd = cmd[:SRE_match_obj.start()] + cmd[(SRE_match_obj.end()+1):]
if re.search(r'\{(.*?)}',cmd) is None:
curly_brackets = False
else:
curly_brackets = False
return ">" in cmd
|
def check_shell_redirection(cmd)
|
Determine whether a command appears to contain shell redirection symbol outside of curly brackets
:param str cmd: Command to investigate.
:return bool: Whether the command appears to contain shell redirection.
| 2.721396
| 2.663526
| 1.021727
|
if isinstance(cmd, Iterable) and not isinstance(cmd, str):
cmd = " ".join(cmd)
return cmd.split()[0].replace('(', '').replace(')', '')
|
def get_proc_name(cmd)
|
Get the representative process name from complex command
:param str | list[str] cmd: a command to be processed
:return str: the basename representative command
| 3.564635
| 4.509593
| 0.790456
|
# Search for the requested parameter.
for pool in param_pools:
if param in pool:
return pool[param]
# Raise error if unfound and no strategy or value is provided or handling
# unmapped parameter requests.
if error and on_missing is None:
raise KeyError("Unmapped parameter: '{}'".format(param))
# Use the value or strategy for handling unmapped parameter case.
try:
return on_missing(param)
except TypeError:
if hasattr(on_missing, "__call__"):
raise TypeError(
"Any callable passed as the action to take when a requested "
"parameter is missing should accept that parameter and return "
"a value.")
return on_missing
|
def get_first_value(param, param_pools, on_missing=None, error=True)
|
Get the value for a particular parameter from the first pool in the provided
priority list of parameter pools.
:param str param: Name of parameter for which to determine/fetch value.
:param Sequence[Mapping[str, object]] param_pools: Ordered (priority)
collection of mapping from parameter name to value; this should be
ordered according to descending priority.
:param object | function(str) -> object on_missing: default value or
action to take if the requested parameter is missing from all of the
pools. If a callable, it should return a value when passed the
requested parameter as the one and only argument.
:param bool error: Whether to raise an error if the requested parameter
is not mapped to a value AND there's no value or strategy provided
with 'on_missing' with which to handle the case of a request for an
unmapped parameter.
:return object: Value to which the requested parameter first mapped in
the (descending) priority collection of parameter 'pools,' or
a value explicitly defined or derived with 'on_missing.'
:raise KeyError: If the requested parameter is unmapped in all of the
provided pools, and the argument to the 'error' parameter evaluates
to True.
| 5.711907
| 4.169869
| 1.369805
|
file_folder, _ = os.path.split(fpath)
other_folder = os.path.join(folder, "")
return other_folder.startswith(file_folder)
|
def is_in_file_tree(fpath, folder)
|
Determine whether a file is in a folder.
:param str fpath: filepath to investigate
:param folder: path to folder to query
:return bool: whether the path indicated is in the folder indicated
| 3.697238
| 4.786077
| 0.772499
|
_, ext = os.path.splitext(file_name)
return file_name.endswith(".fastq.gz") or file_name.endswith(".fq.gz")
|
def is_gzipped_fastq(file_name)
|
Determine whether indicated file appears to be a gzipped FASTQ.
:param str file_name: Name/path of file to check as gzipped FASTQ.
:return bool: Whether indicated file appears to be in gzipped FASTQ format.
| 2.544674
| 3.06534
| 0.830144
|
def make_name(p):
return p.replace(path_base_folder, "").replace(os.sep, "__")
if isinstance(original_path, str):
return make_name(original_path)
elif isinstance(original_path, Sequence):
return [make_name(p) for p in original_path]
raise TypeError("Neither string nor other sequence type: {} ({})".
format(original_path, type(original_path)))
|
def make_lock_name(original_path, path_base_folder)
|
Create name for lock file from an absolute path.
The original path must be absolute, and it should point to a location
within the location indicated by the base folder path provided. This is
particularly useful for deleting a sample's output folder path from
within the path of a target file to generate a lock file corresponding
to the original target.
:param str original_path: Full original filepath.
:param str path_base_folder: Portion of original path to delete
:return str: Name or perhaps relative (to the base folder path indicated)
path to lock file
| 2.87041
| 3.24217
| 0.885336
|
if target is None or isinstance(target, str):
return False
elif isinstance(target, Sequence):
return len(target) > 1
else:
raise TypeError("Could not interpret argument as a target: {} ({})".
format(target, type(target)))
|
def is_multi_target(target)
|
Determine if pipeline manager's run target is multiple.
:param None or str or Sequence of str target: 0, 1, or multiple targets
:return bool: Whether there are multiple targets
:raise TypeError: if the argument is neither None nor string nor Sequence
| 3.185887
| 3.096428
| 1.028891
|
cores = cores or getattr(pm, "cores", default)
return int(cores)
|
def parse_cores(cores, pm, default)
|
Framework to finalize number of cores for an operation.
Some calls to a function may directly provide a desired number of cores,
others may not. Similarly, some pipeline managers may define a cores count
while others will not. This utility provides a single via which the
count of cores to use for an operation may be determined. If a cores
count is given explicitly, use that. Then try pipeline manager for cores.
Finally, fall back to a default. Force default to be defined (this
function is intended to be partially applied, then reused within a
module, class, etc. to standardize the way in which this value is
determined within a scope.)
:param int | str cores: direct specification of cores count
:param pypiper.PipelineManager pm: pipeline manager perhaps defining cores
:param int | str default: default number of cores, used if a value isn't
directly given and the pipeline manager doesn't define core count.
:return int: number of cores
| 6.516126
| 13.892586
| 0.469036
|
if isinstance(stage, str):
return stage
try:
return stage.name
except AttributeError:
try:
return stage.__name__
except AttributeError:
raise TypeError("Unsupported stage type: {}".format(type(stage)))
|
def parse_stage_name(stage)
|
Determine the name of a stage.
The stage may be provided already as a name, as a Stage object, or as a
callable with __name__ (e.g., function).
:param str | pypiper.Stage | function stage: Object representing a stage,
from which to obtain name.
:return str: Name of putative pipeline Stage.
| 2.670384
| 3.016978
| 0.885119
|
if filename is None and suffix is None:
raise TypeError("Provide filename and/or suffix to create "
"path to a pipeline file.")
filename = (filename or pm.name) + (suffix or "")
# Note that Pipeline and PipelineManager define the same outfolder.
# In fact, a Pipeline just references its manager's outfolder.
# So we can handle argument of either type to pm parameter.
return filename if os.path.isabs(filename) \
else os.path.join(pm.outfolder, filename)
|
def pipeline_filepath(pm, filename=None, suffix=None)
|
Derive path to file for managed pipeline.
:param pypiper.PipelineManager | pypiper.Pipeline pm: Manager of a
particular pipeline instance.
:param str filename: Name of file for which to create full path based
on pipeline's output folder.
:param str suffix: Suffix for the file; this can be added to the filename
if provided or added to the pipeline name if there's no filename.
:raises TypeError: If neither filename nor suffix is provided, raise a
TypeError, as in that case there's no substance from which to create
a filepath.
:return str: Path to file within managed pipeline's output folder, with
filename as given or determined by the pipeline name, and suffix
appended if given.
| 7.431487
| 6.655632
| 1.116571
|
# First ensure that we have text.
name = parse_stage_name(stage)
# Cast to string to ensure that indexed stages (ints are handled).
return str(name).lower().replace(" ", STAGE_NAME_SPACE_REPLACEMENT)
|
def translate_stage_name(stage)
|
Account for potential variability in stage/phase name definition.
Since a pipeline author is free to name his/her processing phases/stages
as desired, but these choices influence file names, enforce some
standardization. Specifically, prohibit potentially problematic spaces.
:param str | pypiper.Stage | function stage: Pipeline stage, its name, or a
representative function.
:return str: Standardized pipeline phase/stage name.
| 17.260447
| 17.286997
| 0.998464
|
if sys.version_info < (3, 3):
from collections import Iterable
else:
from collections.abc import Iterable
# Define the argument groups.
args_by_group = {
"pypiper": ["recover", "new-start", "dirty", "force-follow"],
"config": ["config"],
"checkpoint": ["stop-before", "stop-after"],
"resource": ["mem", "cores"],
"looper": ["config", "output-parent", "mem", "cores"],
"common": ["input", "sample-name"],
"ngs": ["sample-name", "input", "input2", "genome", "single-or-paired"]
}
# Handle various types of group specifications.
groups = None
if use_all_args:
groups = args_by_group.keys()
elif isinstance(argument_groups, str):
groups = [argument_groups]
elif isinstance(argument_groups, Iterable):
groups = argument_groups
elif argument_groups:
raise TypeError("arguments must be a str or a list.")
# Collect the groups of arguments.
final_args = list()
if groups:
for g in groups:
try:
this_group_args = args_by_group[g]
except KeyError:
print("Skipping undefined pypiper argument group '{}'".format(g))
else:
final_args.extend(this_group_args)
# final_args |= {this_group_args} if \
# isinstance(this_group_args, str) else set(this_group_args)
# Handle various types of specific, individual argument specifications.
if isinstance(arguments, str):
final_args.append(arguments)
elif isinstance(arguments, Iterable):
final_args.extend(arguments)
elif arguments:
raise TypeError("arguments must be a str or a list.")
return uniqify(final_args)
|
def _determine_args(argument_groups, arguments, use_all_args=False)
|
Determine the arguments to add to a parser (for a pipeline).
:param Iterable[str] | str argument_groups: Collection of names of groups
of arguments to add to an argument parser.
:param Iterable[str] | str arguments: Collection of specific arguments to
add to the parser.
:param bool use_all_args: Whether to use all arguments defined here.
:return set[str]: Collection of (unique) argument names to add to a parser.
| 3.750574
| 3.605298
| 1.040295
|
import copy
required = required or []
# Determine the default pipeline config file.
pipeline_script = os.path.basename(sys.argv[0])
default_config, _ = os.path.splitext(pipeline_script)
default_config += ".yaml"
# Define the arguments.
argument_data = {
"recover":
("-R", {"action": "store_true",
"help": "Overwrite locks to recover from previous failed run"}),
"new-start":
("-N", {"action": "store_true",
"help": "Overwrite all results to start a fresh run"}),
"dirty":
("-D", {"action": "store_true",
"help": "Don't auto-delete intermediate files"}),
"force-follow":
("-F", {"action": "store_true",
"help": "Always run 'follow' commands"}),
"start-point":
{"help": "Name of pipeline stage at which to begin"},
"stop-before":
{"help": "Name of pipeline stage at which to stop "
"(exclusive, i.e. not run)"},
"stop-after":
{"help": "Name of pipeline stage at which to stop "
"(inclusive, i.e. run)"},
"config":
("-C", {"dest": "config_file", "metavar": "CONFIG_FILE",
"default": default_config,
"help": "Pipeline configuration file (YAML). "
"Relative paths are with respect to the "
"pipeline script."}),
"sample-name":
("-S", {"metavar": "SAMPLE_NAME",
"help": "Name for sample to run"}),
"output-parent":
("-O", {"metavar": "PARENT_OUTPUT_FOLDER",
"help": "Parent output directory of project"}),
"cores":
("-P", {"type": int, "default": 1, "metavar": "NUMBER_OF_CORES",
"help": "Number of cores for parallelized processes"}),
"mem":
("-M", {"default": "4000", "metavar": "MEMORY_LIMIT",
"help": "Memory limit for processes accepting such. "
"Default units are megabytes unless specified "
"using the suffix [K|M|G|T]."}),
"input":
("-I", {"nargs": "+", "metavar": "INPUT_FILES",
"help": "One or more primary input files"}),
"input2":
("-I2", {"nargs": "*", "metavar": "INPUT_FILES2",
"help": "Secondary input files, such as read2"}),
"genome":
("-G", {"dest": "genome_assembly",
"help": "Identifier for genome assembly"}),
"single-or-paired":
("-Q", {"default": "single",
"help": "Single- or paired-end sequencing protocol"})
}
if len(required) > 0:
required_named = parser.add_argument_group('required named arguments')
# Configure the parser for each argument.
for arg in args:
try:
argdata = copy.deepcopy(argument_data[arg])
except KeyError:
print("Skipping undefined pypiper argument: '{}'".format(arg))
continue
if isinstance(argdata, dict):
short_opt = None
else:
try:
short_opt, argdata = argdata
except ValueError:
raise TypeError(
"Option name must map to dict or two-tuple (short "
"name and dict) of argument command-line argument "
"specification data.")
argdata["required"] = arg in required
long_opt = "--{}".format(arg)
opts = (short_opt, long_opt) if short_opt else (long_opt, )
if arg in required:
required_named.add_argument(*opts, **argdata)
else:
parser.add_argument(*opts, **argdata)
return parser
|
def _add_args(parser, args, required)
|
Add new arguments to an ArgumentParser.
:param argparse.ArgumentParser parser: instance to update with new arguments
:param Iterable[str] args: Collection of names of arguments to add.
:param Iterable[str] required: Collection of arguments to designate as required
:return argparse.ArgumentParser: Updated ArgumentParser
| 3.242457
| 3.233149
| 1.002879
|
for p in paths:
# Only provide assurance for absolute paths.
if not p or not os.path.isabs(p):
continue
# See if what we're assuring is file- or folder-like.
fpath, fname = os.path.split(p)
base, ext = os.path.splitext(fname)
# If there's no extension, ensure that we have the whole path.
# Otherwise, just ensure that we have path to file's folder.
self.make_dir(fpath if ext else p)
|
def _ensure_folders(self, *paths)
|
Ensure that paths to folder(s) exist.
Some command-line tools will not attempt to create folder(s) needed
for output path to exist. They instead assume that they already are
present and will fail if that assumption does not hold.
:param Iterable[str] paths: Collection of path for which
| 6.268327
| 7.095163
| 0.883465
|
# Use `command` to see if command is callable, store exit code
code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
# If exit code is not 0, report which command failed and return False, else return True
if code != 0:
print("Command is not callable: {0}".format(command))
return False
else:
return True
|
def check_command(self, command)
|
Check if command can be called.
| 4.437078
| 4.154944
| 1.067903
|
# use (1024 ** 3) for gigabytes
# equivalent to: stat -Lc '%s' filename
# If given a list, recurse through it.
if type(filenames) is list:
return sum([self.get_file_size(filename) for filename in filenames])
return round(sum([float(os.stat(f).st_size) for f in filenames.split(" ")]) / (1024 ** 2), 4)
|
def get_file_size(self, filenames)
|
Get size of all files in string (space-separated) in megabytes (Mb).
:param str filenames: a space-separated string of filenames
| 5.868743
| 5.614342
| 1.045313
|
self._ensure_folders(output_fastq, output_fastq2, unpaired_fastq)
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " INPUT={0}".format(input_bam)
cmd += " FASTQ={0}".format(output_fastq)
if output_fastq2 is not None and unpaired_fastq is not None:
cmd += " SECOND_END_FASTQ={0}".format(output_fastq2)
cmd += " UNPAIRED_FASTQ={0}".format(unpaired_fastq)
return cmd
|
def bam2fastq(self, input_bam, output_fastq,
output_fastq2=None, unpaired_fastq=None)
|
Create command to convert BAM(s) to FASTQ(s).
:param str input_bam: Path to sequencing reads file to convert
:param output_fastq: Path to FASTQ to write
:param output_fastq2: Path to (R2) FASTQ to write
:param unpaired_fastq: Path to unpaired FASTQ to write
:return str: Command to convert BAM(s) to FASTQ(s)
| 2.409022
| 2.432745
| 0.990249
|
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " I=" + bam_file
cmd += " F=" + out_fastq_pre + "_R1.fastq"
if paired_end:
cmd += " F2=" + out_fastq_pre + "_R2.fastq"
cmd += " INCLUDE_NON_PF_READS=true"
cmd += " QUIET=true"
cmd += " VERBOSITY=ERROR"
cmd += " VALIDATION_STRINGENCY=SILENT"
return cmd
|
def bam_to_fastq(self, bam_file, out_fastq_pre, paired_end)
|
Build command to convert BAM file to FASTQ file(s) (R1/R2).
:param str bam_file: path to BAM file with sequencing reads
:param str out_fastq_pre: path prefix for output FASTQ file(s)
:param bool paired_end: whether the given file contains paired-end
or single-end sequencing reads
:return str: file conversion command, ready to run
| 2.347243
| 2.475731
| 0.948101
|
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
if paired_end:
fq2 = out_fastq_pre + "_R2.fastq"
cmd = self.tools.samtools + " view " + bam_file + " | awk '"
cmd += r'{ if (NR%2==1) print "@"$1"/1\n"$10"\n+\n"$11 > "' + fq1 + '";'
cmd += r' else print "@"$1"/2\n"$10"\n+\n"$11 > "' + fq2 + '"; }'
cmd += "'" # end the awk command
else:
fq2 = None
cmd = self.tools.samtools + " view " + bam_file + " | awk '"
cmd += r'{ print "@"$1"\n"$10"\n+\n"$11 > "' + fq1 + '"; }'
cmd += "'"
return cmd, fq1, fq2
|
def bam_to_fastq_awk(self, bam_file, out_fastq_pre, paired_end)
|
This converts bam file to fastq files, but using awk. As of 2016, this is much faster
than the standard way of doing this using Picard, and also much faster than the
bedtools implementation as well; however, it does no sanity checks and assumes the reads
(for paired data) are all paired (no singletons), in the correct order.
| 2.40183
| 2.365858
| 1.015205
|
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
fq2 = None
cmd = self.tools.bedtools + " bamtofastq -i " + bam_file + " -fq " + fq1 + ".fastq"
if paired_end:
fq2 = out_fastq_pre + "_R2.fastq"
cmd += " -fq2 " + fq2
return cmd, fq1, fq2
|
def bam_to_fastq_bedtools(self, bam_file, out_fastq_pre, paired_end)
|
Converts bam to fastq; A version using bedtools
| 2.215394
| 2.182336
| 1.015148
|
if input_file.endswith(".bam"):
input_ext = ".bam"
elif input_file.endswith(".fastq.gz") or input_file.endswith(".fq.gz"):
input_ext = ".fastq.gz"
elif input_file.endswith(".fastq") or input_file.endswith(".fq"):
input_ext = ".fastq"
else:
errmsg = "'{}'; this pipeline can only deal with .bam, .fastq, " \
"or .fastq.gz files".format(input_file)
raise UnsupportedFiletypeException(errmsg)
return input_ext
|
def get_input_ext(self, input_file)
|
Get the extension of the input_file. Assumes you're using either
.bam or .fastq/.fq or .fastq.gz/.fq.gz.
| 2.298618
| 2.196408
| 1.046535
|
fastq_prefix = os.path.join(fastq_folder, sample_name)
self.make_sure_path_exists(fastq_folder)
# this expects a list; if it gets a string, convert it to a list.
if type(input_file) != list:
input_file = [input_file]
if len(input_file) > 1:
cmd = []
output_file = []
for in_i, in_arg in enumerate(input_file):
output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq"
result_cmd, uf, result_file = \
self.input_to_fastq(in_arg, sample_name, paired_end,
fastq_folder, output, multiclass=True)
cmd.append(result_cmd)
output_file.append(result_file)
else:
# There was only 1 input class.
# Convert back into a string
input_file = input_file[0]
if not output_file:
output_file = fastq_prefix + "_R1.fastq"
input_ext = self.get_input_ext(input_file)
if input_ext == ".bam":
print("Found .bam file")
#cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end)
cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end)
# pm.run(cmd, output_file, follow=check_fastq)
elif input_ext == ".fastq.gz":
print("Found .fastq.gz file")
if paired_end and not multiclass:
# For paired-end reads in one fastq file, we must split the file into 2.
script_path = os.path.join(
self.tools.scripts_dir, "fastq_split.py")
cmd = self.tools.python + " -u " + script_path
cmd += " -i " + input_file
cmd += " -o " + fastq_prefix
# Must also return the set of output files
output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"]
else:
# For single-end reads, we just unzip the fastq.gz file.
# or, paired-end reads that were already split.
cmd = self.ziptool + " -d -c " + input_file + " > " + output_file
# a non-shell version
# cmd1 = "gunzip --force " + input_file
# cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file
# cmd = [cmd1, cmd2]
elif input_ext == ".fastq":
cmd = "ln -sf " + input_file + " " + output_file
print("Found .fastq file; no conversion necessary")
return [cmd, fastq_prefix, output_file]
|
def input_to_fastq(
self, input_file, sample_name,
paired_end, fastq_folder, output_file=None, multiclass=False)
|
Builds a command to convert input file to fastq, for various inputs.
Takes either .bam, .fastq.gz, or .fastq input and returns
commands that will create the .fastq file, regardless of input type.
This is useful to made your pipeline easily accept any of these input
types seamlessly, standardizing you to the fastq which is still the
most common format for adapter trimmers, etc.
It will place the output fastq file in given `fastq_folder`.
:param str input_file: filename of input you want to convert to fastq
:return str: A command (to be run with PipelineManager) that will ensure
your fastq file exists.
| 3.034855
| 3.00509
| 1.009905
|
# Define a temporary function which we will return, to be called by the
# pipeline.
# Must define default parameters here based on the parameters passed in. This locks
# these values in place, so that the variables will be defined when this function
# is called without parameters as a follow function by pm.run.
# This is AFTER merge, so if there are multiple files it means the
# files were split into read1/read2; therefore I must divide by number
# of files for final reads.
def temp_func(input_files=input_files, output_files=output_files,
paired_end=paired_end):
if type(input_files) != list:
input_files = [input_files]
if type(output_files) != list:
output_files = [output_files]
print(input_files)
print(output_files)
n_input_files = len(filter(bool, input_files))
total_reads = sum([int(self.count_reads(input_file, paired_end))
for input_file in input_files])
raw_reads = total_reads / n_input_files
self.pm.report_result("Raw_reads", str(raw_reads))
total_fastq_reads = sum(
[int(self.count_reads(output_file, paired_end))
for output_file in output_files])
fastq_reads = total_fastq_reads / n_input_files
self.pm.report_result("Fastq_reads", fastq_reads)
input_ext = self.get_input_ext(input_files[0])
# We can only assess pass filter reads in bam files with flags.
if input_ext == ".bam":
num_failed_filter = sum(
[int(self.count_fail_reads(f, paired_end))
for f in input_files])
pf_reads = int(raw_reads) - num_failed_filter
self.pm.report_result("PF_reads", str(pf_reads))
if fastq_reads != int(raw_reads):
raise Exception("Fastq conversion error? Number of reads "
"doesn't match unaligned bam")
return fastq_reads
return temp_func
|
def check_fastq(self, input_files, output_files, paired_end)
|
Returns a follow sanity-check function to be run after a fastq conversion.
Run following a command that will produce the fastq files.
This function will make sure any input files have the same number of reads as the
output files.
| 4.628858
| 4.435498
| 1.043594
|
def temp_func():
print("Evaluating read trimming")
if paired_end and not trimmed_fastq_R2:
print("WARNING: specified paired-end but no R2 file")
n_trim = float(self.count_reads(trimmed_fastq, paired_end))
self.pm.report_result("Trimmed_reads", int(n_trim))
try:
rr = float(self.pm.get_stat("Raw_reads"))
except:
print("Can't calculate trim loss rate without raw read result.")
else:
self.pm.report_result(
"Trim_loss_rate", round((rr - n_trim) * 100 / rr, 2))
# Also run a fastqc (if installed/requested)
if fastqc_folder:
if fastqc_folder and os.path.isabs(fastqc_folder):
self.make_sure_path_exists(fastqc_folder)
cmd = self.fastqc(trimmed_fastq, fastqc_folder)
self.pm.run(cmd, lock_name="trimmed_fastqc", nofail=True)
fname, ext = os.path.splitext(os.path.basename(trimmed_fastq))
fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html")
self.pm.report_object("FastQC report r1", fastqc_html)
if paired_end and trimmed_fastq_R2:
cmd = self.fastqc(trimmed_fastq_R2, fastqc_folder)
self.pm.run(cmd, lock_name="trimmed_fastqc_R2", nofail=True)
fname, ext = os.path.splitext(os.path.basename(trimmed_fastq_R2))
fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html")
self.pm.report_object("FastQC report r2", fastqc_html)
return temp_func
|
def check_trim(self, trimmed_fastq, paired_end, trimmed_fastq_R2=None, fastqc_folder=None)
|
Build function to evaluate read trimming, and optionally run fastqc.
This is useful to construct an argument for the 'follow' parameter of
a PipelineManager's 'run' method.
:param str trimmed_fastq: Path to trimmed reads file.
:param bool paired_end: Whether the processing is being done with
paired-end sequencing data.
:param str trimmed_fastq_R2: Path to read 2 file for the paired-end case.
:param str fastqc_folder: Path to folder within which to place fastqc
output files; if unspecified, fastqc will not be run.
:return callable: Function to evaluate read trimming and possibly run
fastqc.
| 2.803056
| 2.712471
| 1.033396
|
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " ValidateSamFile"
cmd += " INPUT=" + input_bam
return cmd
|
def validate_bam(self, input_bam)
|
Wrapper for Picard's ValidateSamFile.
:param str input_bam: Path to file to validate.
:return str: Command to run for the validation.
| 4.446613
| 3.861595
| 1.151496
|
if not len(input_bams) > 1:
print("No merge required")
return 0
outdir, _ = os.path.split(merged_bam)
if outdir and not os.path.exists(outdir):
print("Creating path to merge file's folder: '{}'".format(outdir))
os.makedirs(outdir)
# Handle more intuitive boolean argument.
if in_sorted in [False, True]:
in_sorted = "TRUE" if in_sorted else "FALSE"
input_string = " INPUT=" + " INPUT=".join(input_bams)
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " MergeSamFiles"
cmd += input_string
cmd += " OUTPUT=" + merged_bam
cmd += " ASSUME_SORTED=" + str(in_sorted)
cmd += " CREATE_INDEX=TRUE"
cmd += " VALIDATION_STRINGENCY=SILENT"
if tmp_dir:
cmd += " TMP_DIR=" + tmp_dir
return cmd
|
def merge_bams(self, input_bams, merged_bam, in_sorted="TRUE", tmp_dir=None)
|
Combine multiple files into one.
The tmp_dir parameter is important because on poorly configured
systems, the default can sometimes fill up.
:param Iterable[str] input_bams: Paths to files to combine
:param str merged_bam: Path to which to write combined result.
:param bool | str in_sorted: Whether the inputs are sorted
:param str tmp_dir: Path to temporary directory.
| 3.053795
| 3.138738
| 0.972937
|
if remove_inputs and not run:
raise ValueError("Can't delete files if command isn't run")
cmd = "cat {} > {}".format(" ".join(inputs), output)
if run:
subprocess.check_call(cmd.split(), shell=True)
if remove_inputs:
cmd = "rm {}".format(" ".join(inputs))
subprocess.check_call(cmd.split(), shell=True)
else:
return cmd
|
def merge_fastq(self, inputs, output, run=False, remove_inputs=False)
|
Merge FASTQ files (zipped or not) into one.
:param Iterable[str] inputs: Collection of paths to files to merge.
:param str output: Path to single output file.
:param bool run: Whether to run the command.
:param bool remove_inputs: Whether to keep the original files.
:return NoneType | str: Null if running the command, otherwise the
command itself
:raise ValueError: Raise ValueError if the call is such that
inputs are to be deleted but command is not run.
| 2.802959
| 2.300355
| 1.21849
|
x = subprocess.check_output("wc -l " + file_name + " | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '", shell=True)
return x.strip()
|
def count_lines(self, file_name)
|
Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc.
:param str file_name: name of file whose lines are to be counted
| 3.580799
| 3.727656
| 0.960604
|
x = subprocess.check_output(self.tools.samtools + " view -H " + file_name + " | grep '^@SQ' | cut -f2| sed s'/SN://'", shell=True)
# Chromosomes will be separated by newlines; split into list to return
return x.split()
|
def get_chrs_from_bam(self, file_name)
|
Uses samtools to grab the chromosomes from the header that are contained
in this bam file.
| 6.397407
| 5.697834
| 1.122779
|
if file_name.endswith("sam"):
param = "-S"
if file_name.endswith("bam"):
param = ""
if paired_end:
r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
else:
r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = 0
return int(r1) + int(r2)
|
def count_unique_reads(self, file_name, paired_end)
|
Sometimes alignment software puts multiple locations for a single read; if you just count
those reads, you will get an inaccurate count. This is _not_ the same as multimapping reads,
which may or may not be actually duplicated in the bam file (depending on the alignment
software).
This function counts each read only once.
This accounts for paired end or not for free because pairs have the same read name.
In this function, a paired-end read would count as 2 reads.
| 2.168379
| 2.148317
| 1.009339
|
_, ext = os.path.splitext(file_name)
ext = ext.lower()
if ext == ".sam":
param = "-S -F4"
elif ext == "bam":
param = "-F4"
else:
raise ValueError("Not a SAM or BAM: '{}'".format(file_name))
if paired_end:
r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
else:
r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = 0
return int(r1) + int(r2)
|
def count_unique_mapped_reads(self, file_name, paired_end)
|
For a bam or sam file with paired or or single-end reads, returns the
number of mapped reads, counting each read only once, even if it appears
mapped at multiple locations.
:param str file_name: name of reads file
:param bool paired_end: True/False paired end data
:return int: Number of uniquely mapped reads.
| 2.35479
| 2.351081
| 1.001578
|
param = " -c -f" + str(flag)
if file_name.endswith("sam"):
param += " -S"
return self.samtools_view(file_name, param=param)
|
def count_flag_reads(self, file_name, flag, paired_end)
|
Counts the number of reads with the specified flag.
:param str file_name: name of reads file
:param str flag: sam flag value to be read
:param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the option here just for consistency, since all the other
counting functions require the parameter. This makes it easier to swap counting functions during
pipeline development.
| 6.278889
| 7.352077
| 0.854029
|
param = " -c -F256"
if file_name.endswith("sam"):
param += " -S"
return self.samtools_view(file_name, param=param)
|
def count_uniquelymapping_reads(self, file_name, paired_end)
|
Counts the number of reads that mapped to a unique position.
:param str file_name: name of reads file
:param bool paired_end: This parameter is ignored.
| 7.908658
| 10.02166
| 0.789157
|
cmd = "{} view {} {} {}".format(
self.tools.samtools, param, file_name, postpend)
return subprocess.check_output(cmd, shell=True)
|
def samtools_view(self, file_name, param, postpend="")
|
Run samtools view, with flexible parameters and post-processing.
This is used internally to implement the various count_reads functions.
:param str file_name: file_name
:param str param: String of parameters to pass to samtools view
:param str postpend: String to append to the samtools command;
useful to add cut, sort, wc operations to the samtools view output.
| 3.508955
| 4.611655
| 0.760888
|
_, ext = os.path.splitext(file_name)
if not (is_sam_or_bam(file_name) or is_fastq(file_name)):
# TODO: make this an exception and force caller to handle that
# rather than relying on knowledge of possibility of negative value.
return -1
if is_sam_or_bam(file_name):
param_text = "-c" if ext == ".bam" else "-c -S"
return self.samtools_view(file_name, param=param_text)
else:
num_lines = self.count_lines_zip(file_name) \
if is_gzipped_fastq(file_name) \
else self.count_lines(file_name)
divisor = 2 if paired_end else 4
return int(num_lines) / divisor
|
def count_reads(self, file_name, paired_end)
|
Count reads in a file.
Paired-end reads count as 2 in this function.
For paired-end reads, this function assumes that the reads are split
into 2 files, so it divides line count by 2 instead of 4.
This will thus give an incorrect result if your paired-end fastq files
are in only a single file (you must divide by 2 again).
:param str file_name: Name/path of file whose reads are to be counted.
:param bool paired_end: Whether the file contains paired-end reads.
| 4.779892
| 4.598215
| 1.039511
|
cmd = self.tools.samtools + " view " + aligned_bam + " | "
cmd += "grep 'YT:Z:CP'" + " | uniq -u | wc -l | sed -E 's/^[[:space:]]+//'"
return subprocess.check_output(cmd, shell=True)
|
def count_concordant(self, aligned_bam)
|
Count only reads that "aligned concordantly exactly 1 time."
:param str aligned_bam: File for which to count mapped reads.
| 5.358687
| 5.80928
| 0.922436
|
if file_name.endswith("bam"):
return self.samtools_view(file_name, param="-c -F4")
if file_name.endswith("sam"):
return self.samtools_view(file_name, param="-c -F4 -S")
return -1
|
def count_mapped_reads(self, file_name, paired_end)
|
Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq,
and therefore, doesn't require a paired-end parameter because it only uses samtools view.
Therefore, it's ok that it has a default parameter, since this is discarded.
:param str file_name: File for which to count mapped reads.
:param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the option here just for consistency, since all the other
counting functions require the parameter. This makes it easier to swap counting functions during
pipeline development.
:return int: Either return code from samtools view command, or -1 to indicate an error state.
| 3.467831
| 3.046321
| 1.138367
|
cmd = self.tools.samtools + " view -bS " + sam_file + " > " + sam_file.replace(".sam", ".bam") + "\n"
cmd += self.tools.samtools + " sort " + sam_file.replace(".sam", ".bam") + " -o " + sam_file.replace(".sam", "_sorted.bam") + "\n"
cmd += self.tools.samtools + " index " + sam_file.replace(".sam", "_sorted.bam") + "\n"
if depth:
cmd += self.tools.samtools + " depth " + sam_file.replace(".sam", "_sorted.bam") + " > " + sam_file.replace(".sam", "_sorted.depth") + "\n"
return cmd
|
def sam_conversions(self, sam_file, depth=True)
|
Convert sam files to bam files, then sort and index them for later use.
:param bool depth: also calculate coverage over each position
| 1.565705
| 1.648473
| 0.949791
|
cmd = self.tools.samtools + " view -h " + bam_file + " > " + bam_file.replace(".bam", ".sam") + "\n"
cmd += self.tools.samtools + " sort " + bam_file + " -o " + bam_file.replace(".bam", "_sorted.bam") + "\n"
cmd += self.tools.samtools + " index " + bam_file.replace(".bam", "_sorted.bam") + "\n"
if depth:
cmd += self.tools.samtools + " depth " + bam_file.replace(".bam", "_sorted.bam") + " > " + bam_file.replace(".bam", "_sorted.depth") + "\n"
return cmd
|
def bam_conversions(self, bam_file, depth=True)
|
Sort and index bam files for later use.
:param bool depth: also calculate coverage over each position
| 1.556161
| 1.668861
| 0.932469
|
# You can find the fastqc help with fastqc --help
try:
pm = self.pm
except AttributeError:
# Do nothing, this is just for path construction.
pass
else:
if not os.path.isabs(output_dir) and pm is not None:
output_dir = os.path.join(pm.outfolder, output_dir)
self.make_sure_path_exists(output_dir)
return "{} --noextract --outdir {} {}".\
format(self.tools.fastqc, output_dir, file)
|
def fastqc(self, file, output_dir)
|
Create command to run fastqc on a FASTQ file
:param str file: Path to file with sequencing reads
:param str output_dir: Path to folder in which to place output
:return str: Command with which to run fastqc
| 4.903086
| 5.044398
| 0.971986
|
cmds = list()
initial = os.path.splitext(os.path.basename(input_bam))[0]
cmd1 = self.fastqc(input_bam, output_dir)
cmds.append(cmd1)
cmd2 = "if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi".format(
os.path.join(output_dir, initial), os.path.join(output_dir, sample_name))
cmds.append(cmd2)
return cmds
|
def fastqc_rename(self, input_bam, output_dir, sample_name)
|
Create pair of commands to run fastqc and organize files.
The first command returned is the one that actually runs fastqc when
it's executed; the second moves the output files to the output
folder for the sample indicated.
:param str input_bam: Path to file for which to run fastqc.
:param str output_dir: Path to folder in which fastqc output will be
written, and within which the sample's output folder lives.
:param str sample_name: Sample name, which determines subfolder within
output_dir for the fastqc files.
:return list[str]: Pair of commands, to run fastqc and then move the files to
their intended destination based on sample name.
| 2.351378
| 2.63263
| 0.893167
|
cmd = self.tools.samtools + " index {0}".format(bam_file)
return cmd
|
def samtools_index(self, bam_file)
|
Index a bam file.
| 5.034904
| 5.391093
| 0.93393
|
pe = input_fastq2 is not None
mode = "pe" if pe else "any"
cmds = list()
cmd1 = self.tools.skewer + " --quiet"
cmd1 += " -f sanger"
cmd1 += " -t {0}".format(cpus)
cmd1 += " -m {0}".format(mode)
cmd1 += " -x {0}".format(adapters)
cmd1 += " -o {0}".format(output_prefix)
cmd1 += " {0}".format(input_fastq1)
if input_fastq2 is None:
cmds.append(cmd1)
else:
cmd1 += " {0}".format(input_fastq2)
cmds.append(cmd1)
if input_fastq2 is None:
cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed.fastq", output_fastq1)
cmds.append(cmd2)
else:
cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed-pair1.fastq", output_fastq1)
cmds.append(cmd2)
cmd3 = "mv {0} {1}".format(output_prefix + "-trimmed-pair2.fastq", output_fastq2)
cmds.append(cmd3)
cmd4 = "mv {0} {1}".format(output_prefix + "-trimmed.log", log)
cmds.append(cmd4)
return cmds
|
def skewer(
self, input_fastq1, output_prefix, output_fastq1,
log, cpus, adapters, input_fastq2=None, output_fastq2=None)
|
Create commands with which to run skewer.
:param str input_fastq1: Path to input (read 1) FASTQ file
:param str output_prefix: Prefix for output FASTQ file names
:param str output_fastq1: Path to (read 1) output FASTQ file
:param str log: Path to file to which to write logging information
:param int | str cpus: Number of processing cores to allow
:param str adapters: Path to file with sequencing adapters
:param str input_fastq2: Path to read 2 input FASTQ file
:param str output_fastq2: Path to read 2 output FASTQ file
:return list[str]: Sequence of commands to run to trim reads with
skewer and rename files as desired.
| 1.878934
| 1.916521
| 0.980388
|
nodups = re.sub("\.bam$", "", output_bam) + ".nodups.nofilter.bam"
cmd1 = self.tools.sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}".format(cpus, input_bam, nodups, metrics_file)
cmd2 = self.tools.sambamba + ' view -t {0} -f bam --valid'.format(cpus)
if paired:
cmd2 += ' -F "not (unmapped or mate_is_unmapped) and proper_pair'
else:
cmd2 += ' -F "not unmapped'
cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}"'.format(Q)
cmd2 += ' {0} |'.format(nodups)
cmd2 += self.tools.sambamba + " sort -t {0} /dev/stdin -o {1}".format(cpus, output_bam)
cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups)
cmd4 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups + ".bai")
return [cmd1, cmd2, cmd3, cmd4]
|
def filter_reads(self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30)
|
Remove duplicates, filter for >Q, remove multiple mapping reads.
For paired-end reads, keep only proper pairs.
| 2.812086
| 2.772643
| 1.014226
|
base = "{} {} -rf -savp".format(self.tools.Rscript, self.tools.spp)
cmd = base + " -savp={} -s=0:5:500 -c={} -out={} -p={}".format(
plot, input_bam, output, cpus)
return cmd
|
def run_spp(self, input_bam, output, plot, cpus)
|
Run the SPP read peak analysis tool.
:param str input_bam: Path to reads file
:param str output: Path to output file
:param str plot: Path to plot file
:param int cpus: Number of processors to use
:return str: Command with which to run SPP
| 9.162084
| 9.353112
| 0.979576
|
# TODO:
# addjust fragment length dependent on read size and real fragment size
# (right now it asssumes 50bp reads with 180bp fragments)
cmds = list()
transient_file = os.path.abspath(re.sub("\.bigWig", "", output_bigwig))
cmd1 = self.tools.bedtools + " bamtobed -i {0} |".format(input_bam)
if not tagmented:
cmd1 += " " + self.tools.bedtools + " slop -i stdin -g {0} -s -l 0 -r 130 |".format(genome_sizes)
cmd1 += " fix_bedfile_genome_boundaries.py {0} |".format(genome)
cmd1 += " " + self.tools.genomeCoverageBed + " {0}-bg -g {1} -i stdin > {2}.cov".format(
"-5 " if tagmented else "",
genome_sizes,
transient_file
)
cmds.append(cmd1)
if normalize:
cmds.append(.format(transient_file, norm_factor))
cmds.append(self.tools.bedGraphToBigWig + " {0}{1}.cov {2} {3}".format(transient_file, ".normalized" if normalize else "", genome_sizes, output_bigwig))
# remove tmp files
cmds.append("if [[ -s {0}.cov ]]; then rm {0}.cov; fi".format(transient_file))
if normalize:
cmds.append("if [[ -s {0}.normalized.cov ]]; then rm {0}.normalized.cov; fi".format(transient_file))
cmds.append("chmod 755 {0}".format(output_bigwig))
return cmds
|
def bam_to_bigwig(
self, input_bam, output_bigwig, genome_sizes, genome,
tagmented=False, normalize=False, norm_factor=1000)
|
Convert a BAM file to a bigWig file.
:param str input_bam: path to BAM file to convert
:param str output_bigwig: path to which to write file in bigwig format
:param str genome_sizes: path to file with chromosome size information
:param str genome: name of genomic assembly
:param bool tagmented: flag related to read-generating protocol
:param bool normalize: whether to normalize coverage
:param int norm_factor: number of bases to use for normalization
:return list[str]: sequence of commands to execute
| 3.80689
| 3.855108
| 0.987492
|
cmd = self.simple_frip(input_bam, input_bed, threads)
return subprocess.check_output(cmd.split(" "), shell=True)
|
def calc_frip(self, input_bam, input_bed, threads=4)
|
Calculate fraction of reads in peaks.
A file of with a pool of sequencing reads and a file with peak call
regions define the operation that will be performed. Thread count
for samtools can be specified as well.
:param str input_bam: sequencing reads file
:param str input_bed: file with called peak regions
:param int threads: number of threads samtools may use
:return float: fraction of reads in peaks defined in given peaks file
| 4.119648
| 6.155104
| 0.669306
|
sizes = {"hg38": 2.7e9, "hg19": 2.7e9, "mm10": 1.87e9, "dr7": 1.412e9, "mm9": 1.87e9}
# Whether to specify to MACS2 a value for statistical significance
# can be either directly indicated, but if not, it's determined by
# whether the mark is associated with broad peaks. By default, we
# specify a significance value to MACS2 for a mark associated with a
# broad peak.
if include_significance is None:
include_significance = broad
cmd = self.tools.macs2 + " callpeak -t {0}".format(treatment_bams if type(treatment_bams) is str else " ".join(treatment_bams))
if control_bams is not None:
cmd += " -c {0}".format(control_bams if type(control_bams) is str else " ".join(control_bams))
if paired:
cmd += " -f BAMPE "
# Additional settings based on whether the marks is associated with
# broad peaks
if broad:
cmd += " --broad --nomodel --extsize 73"
else:
cmd += " --fix-bimodal --extsize 180 --bw 200"
if include_significance:
# Allow significance specification via either p- or q-value,
# giving preference to q-value if both are provided but falling
# back on a default p-value if neither is provided but inclusion
# of statistical significance measure is desired.
if qvalue is not None:
cmd += " --qvalue {}".format(qvalue)
else:
cmd += " --pvalue {}".format(pvalue or 0.00001)
cmd += " -g {0} -n {1} --outdir {2}".format(sizes[genome], sample_name, output_dir)
return cmd
|
def macs2_call_peaks(
self, treatment_bams, output_dir, sample_name, genome,
control_bams=None, broad=False, paired=False,
pvalue=None, qvalue=None, include_significance=None)
|
Use MACS2 to call peaks.
:param str | Iterable[str] treatment_bams: Paths to files with data to
regard as treatment.
:param str output_dir: Path to output folder.
:param str sample_name: Name for the sample involved.
:param str genome: Name of the genome assembly to use.
:param str | Iterable[str] control_bams: Paths to files with data to
regard as control
:param bool broad: Whether to do broad peak calling.
:param bool paired: Whether reads are paired-end
:param float | NoneType pvalue: Statistical significance measure to
pass as --pvalue to peak calling with MACS
:param float | NoneType qvalue: Statistical significance measure to
pass as --qvalue to peak calling with MACS
:param bool | NoneType include_significance: Whether to pass a
statistical significance argument to peak calling with MACS; if
omitted, this will be True if the peak calling is broad or if
either p-value or q-value is specified; default significance
specification is a p-value of 0.001 if a significance is to be
specified but no value is provided for p-value or q-value.
:return str: Command to run.
| 4.176424
| 4.03387
| 1.035339
|
broad = "TRUE" if broad else "FALSE"
cmd = self.tools.Rscript + " `which spp_peak_calling.R` {0} {1} {2} {3} {4} {5} {6}".format(
treatment_bam, control_bam, treatment_name, control_name, broad, cpus, output_dir
)
if qvalue is not None:
cmd += " {}".format(qvalue)
return cmd
|
def spp_call_peaks(
self, treatment_bam, control_bam, treatment_name, control_name,
output_dir, broad, cpus, qvalue=None)
|
Build command for R script to call peaks with SPP.
:param str treatment_bam: Path to file with data for treatment sample.
:param str control_bam: Path to file with data for control sample.
:param str treatment_name: Name for the treatment sample.
:param str control_name: Name for the control sample.
:param str output_dir: Path to folder for output.
:param str | bool broad: Whether to specify broad peak calling mode.
:param int cpus: Number of cores the script may use.
:param float qvalue: FDR, as decimal value
:return str: Command to run.
| 2.786527
| 3.049743
| 0.913693
|
from collections import Counter
try:
p = subprocess.Popen([self.tools.samtools, 'view', bam_file],
stdout=subprocess.PIPE)
# Count paired alignments
paired = 0
read_length = Counter()
while n > 0:
line = p.stdout.next().split("\t")
flag = int(line[1])
read_length[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
n -= 1
p.kill()
except IOError("Cannot read provided bam file.") as e:
raise e
# Get most abundant read read_length
read_length = sorted(read_length)[-1]
# If at least half is paired, return True
if paired > (n / 2.):
return "PE", read_length
else:
return "SE", read_length
|
def get_read_type(self, bam_file, n=10)
|
Gets the read type (single, paired) and length of bam file.
:param str bam_file: Bam file to determine read attributes.
:param int n: Number of lines to read from bam file.
:return str, int: tuple of read type and read length
| 4.187023
| 4.340145
| 0.96472
|
import pandas as pd
stats = pd.Series(index=["readCount", "unpaired", "unaligned", "unique", "multiple", "alignmentRate"])
try:
with open(stats_file) as handle:
content = handle.readlines() # list of strings per line
except:
return stats
# total reads
try:
line = [i for i in range(len(content)) if " reads; of these:" in content[i]][0]
stats["readCount"] = re.sub("\D.*", "", content[line])
if 7 > len(content) > 2:
line = [i for i in range(len(content)) if "were unpaired; of these:" in content[i]][0]
stats["unpaired"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
else:
line = [i for i in range(len(content)) if "were paired; of these:" in content[i]][0]
stats["unpaired"] = stats["readCount"] - int(re.sub("\D", "", re.sub("\(.*", "", content[line])))
line = [i for i in range(len(content)) if "aligned 0 times" in content[i]][0]
stats["unaligned"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned exactly 1 time" in content[i]][0]
stats["unique"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned >1 times" in content[i]][0]
stats["multiple"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "overall alignment rate" in content[i]][0]
stats["alignmentRate"] = re.sub("\%.*", "", content[line]).strip()
except IndexError:
pass
return stats
|
def parse_bowtie_stats(self, stats_file)
|
Parses Bowtie2 stats file, returns series with values.
:param str stats_file: Bowtie2 output file with alignment statistics.
| 2.080291
| 2.044215
| 1.017648
|
import pandas as pd
series = pd.Series()
try:
with open(stats_file) as handle:
content = handle.readlines() # list of strings per line
except:
return series
try:
line = [i for i in range(len(content)) if "single ends (among them " in content[i]][0]
series["single-ends"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if " end pairs... done in " in content[i]][0]
series["paired-ends"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line]))
line = [i for i in range(len(content)) if " duplicates, sorting the list... done in " in content[i]][0]
series["duplicates"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line]))
except IndexError:
pass
return series
|
def parse_duplicate_stats(self, stats_file)
|
Parses sambamba markdup output, returns series with values.
:param str stats_file: sambamba output file with duplicate statistics.
| 3.520846
| 3.353135
| 1.050016
|
import pandas as pd
series = pd.Series()
try:
with open(qc_file) as handle:
line = handle.readlines()[0].strip().split("\t") # list of strings per line
series["NSC"] = line[-3]
series["RSC"] = line[-2]
series["qualityTag"] = line[-1]
except:
pass
return series
|
def parse_qc(self, qc_file)
|
Parse phantompeakqualtools (spp) QC table and return quality metrics.
:param str qc_file: Path to phantompeakqualtools output file, which
contains sample quality measurements.
| 4.689587
| 4.857494
| 0.965433
|
proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE)
out, err = proc.communicate()
sample["peakNumber"] = re.sub("\D.*", "", out)
return sample
|
def get_peak_number(self, sample)
|
Counts number of peaks from a sample's peak file.
:param pipelines.Sample sample: Sample object with "peaks" attribute.
| 5.126551
| 4.462358
| 1.148843
|
import pandas as pd
with open(sample.frip, "r") as handle:
content = handle.readlines()
reads_in_peaks = int(re.sub("\D", "", content[0]))
mapped_reads = sample["readCount"] - sample["unaligned"]
return pd.Series(reads_in_peaks / mapped_reads, index="FRiP")
|
def get_frip(self, sample)
|
Calculates the fraction of reads in peaks for a given sample.
:param pipelines.Sample sample: Sample object with "peaks" attribute.
| 6.547635
| 5.07948
| 1.289036
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
def _ignore_interrupts(self)
|
Ignore interrupt and termination signals. Used as a pre-execution
function (preexec_fn) for subprocess.Popen calls that pypiper will
control over (i.e., manually clean up).
| 2.179137
| 2.108312
| 1.033593
|
# Remove previous status flag file.
flag_file_path = self._flag_file_path()
try:
os.remove(flag_file_path)
except:
# Print message only if the failure to remove the status flag
# is unexpected; there's no flag for initialization, so we
# can't remove the file.
if self.status != "initializing":
print("Could not remove flag file: '{}'".format(flag_file_path))
pass
# Set new status.
prev_status = self.status
self.status = status
self._create_file(self._flag_file_path())
print("\nChanged status from {} to {}.".format(
prev_status, self.status))
|
def _set_status_flag(self, status)
|
Configure state and files on disk to match current processing status.
:param str status: Name of new status designation for pipeline.
| 4.435678
| 4.490539
| 0.987783
|
flag_file_name = "{}_{}".format(
self.name, flag_name(status or self.status))
return pipeline_filepath(self, filename=flag_file_name)
|
def _flag_file_path(self, status=None)
|
Create path to flag file based on indicated or current status.
Internal variables used are the pipeline name and the designated
pipeline output folder path.
:param str status: flag file type to create, default to current status
:return str: path to flag file of indicated or current status.
| 7.048026
| 7.739297
| 0.91068
|
self._report_command(cmd)
likely_shell = check_shell(cmd, shell)
if shell is None:
shell = likely_shell
if not shell:
if likely_shell:
print("Should this command run in a shell instead of directly in a subprocess?")
cmd = shlex.split(cmd)
try:
return subprocess.check_output(cmd, shell=shell)
except Exception as e:
self._triage_error(e, nofail)
|
def checkprint(self, cmd, shell=None, nofail=False)
|
Just like callprint, but checks output -- so you can get a variable
in python corresponding to the return value of the command you call.
This is equivalent to running subprocess.check_output()
instead of subprocess.call().
:param str | Iterable[str] cmd: Bash command(s) to be run.
:param bool | str shell: If command requires should be run in its own shell. Optional.
Default: "guess" -- `run()` will try to guess if the command should be run in a
shell (based on the presence of a pipe (|) or redirect (>), To force a process to
run as a direct subprocess, set `shell` to False; to force a shell, set True.
:param bool nofail: Should the pipeline bail on a nonzero return from a process? Default: False
Nofail can be used to implement non-essential parts of the pipeline; if these processes fail,
they will not cause the pipeline to bail out.
| 4.941651
| 4.450994
| 1.110235
|
# print("attend:{}".format(proc.pid))
try:
proc.wait(timeout=sleeptime)
except psutil.TimeoutExpired:
return True
return False
|
def _attend_process(self, proc, sleeptime)
|
Waits on a process for a given time to see if it finishes, returns True
if it's still running after the given time or False as soon as it
returns.
:param psutil.Popen proc: Process object opened by psutil.Popen()
:param float sleeptime: Time to wait
:return bool: True if process is still running; otherwise false
| 3.498044
| 3.209205
| 1.090003
|
local_maxmem = -1
sleeptime = .5
while p.poll() is None:
if not shell:
local_maxmem = max(local_maxmem, self._memory_usage(p.pid) / 1e6)
# print("int.maxmem (pid:" + str(p.pid) + ") " + str(local_maxmem))
time.sleep(sleeptime)
sleeptime = min(sleeptime + 5, 60)
self.peak_memory = max(self.peak_memory, local_maxmem)
del self.procs[p.pid]
info = "Process " + str(p.pid) + " returned: (" + str(p.returncode) + ")."
if not shell:
info += " Peak memory: (Process: " + str(round(local_maxmem, 3)) + "GB;"
info += " Pipeline: " + str(round(self.peak_memory, 3)) + "GB)\n"
print(info + "\n")
if p.returncode != 0:
raise Exception("Process returned nonzero result.")
return [p.returncode, local_maxmem]
|
def _wait_for_process(self, p, shell=False)
|
Debug function used in unit tests.
:param p: A subprocess.Popen process.
:param bool shell: If command requires should be run in its own shell. Optional. Default: False.
| 3.47918
| 3.710436
| 0.937674
|
sleeptime = .5
first_message_flag = False
dot_count = 0
recover_file = self._recoverfile_from_lockfile(lock_file)
while os.path.isfile(lock_file):
if first_message_flag is False:
self.timestamp("Waiting for file lock: " + lock_file)
self._set_status_flag(WAIT_FLAG)
first_message_flag = True
else:
sys.stdout.write(".")
dot_count = dot_count + 1
if dot_count % 60 == 0:
print("") # linefeed
# prevents the issue of pypier waiting for the lock file to be gone infinitely
# in case the recovery flag is sticked by other pipeline when it's interrupted
if os.path.isfile(recover_file):
sys.stdout.write(" Dynamic recovery flag found")
break
time.sleep(sleeptime)
sleeptime = min(sleeptime + 2.5, 60)
if first_message_flag:
self.timestamp("File unlocked.")
self._set_status_flag(RUN_FLAG)
|
def _wait_for_lock(self, lock_file)
|
Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted
:param str lock_file: Lock file to wait upon.
| 5.311377
| 5.201064
| 1.02121
|
# Halt if the manager's state has been set such that this call
# should halt the pipeline.
if self.halt_on_next:
self.halt(checkpoint, finished, raise_error=raise_error)
# Determine action to take with respect to halting if needed.
if checkpoint:
if finished:
# Write the file.
self._checkpoint(checkpoint)
self.prev_checkpoint = checkpoint
self.curr_checkpoint = None
else:
self.prev_checkpoint = self.curr_checkpoint
self.curr_checkpoint = checkpoint
self._checkpoint(self.prev_checkpoint)
# Handle the two halting conditions.
if (finished and checkpoint == self.stop_after) or (not finished and checkpoint == self.stop_before):
self.halt(checkpoint, finished, raise_error=raise_error)
# Determine if we've started executing.
elif checkpoint == self.start_point:
self._active = True
# If this is a prospective checkpoint, set the current checkpoint
# accordingly and whether we should halt the pipeline on the
# next timestamp call.
if not finished and checkpoint == self.stop_after:
self.halt_on_next = True
elapsed = self.time_elapsed(self.last_timestamp)
t = time.strftime("%m-%d %H:%M:%S")
if checkpoint is None:
msg = "{m} ({t}) elapsed: {delta_t} _TIME_".\
format(m=message, t=t, delta_t=elapsed)
else:
msg = "{m} ({t}) ({status} {stage}) elapsed: {delta_t} _TIME_".\
format(m=message, t=t,
status="finished" if finished else "starting",
stage=checkpoint, delta_t=elapsed)
if re.match("^###", message):
msg = "\n{}\n".format(msg)
print(msg)
self.last_timestamp = time.time()
|
def timestamp(self, message="", checkpoint=None,
finished=False, raise_error=True)
|
Print message, time, and time elapsed, perhaps creating checkpoint.
This prints your given message, along with the current time, and time
elapsed since the previous timestamp() call. If you specify a
HEADING by beginning the message with "###", it surrounds the message
with newlines for easier readability in the log file. If a checkpoint
is designated, an empty file is created corresponding to the name
given. Depending on how this manager's been configured, the value of
the checkpoint, and whether this timestamp indicates initiation or
completion of a group of pipeline steps, this call may stop the
pipeline's execution.
:param str message: Message to timestamp.
:param str checkpoint: Name of checkpoint; this tends to be something
that reflects the processing logic about to be or having just been
completed. Provision of an argument to this parameter means that
a checkpoint file will be created, facilitating arbitrary starting
and stopping point for the pipeline as desired.
:param bool finished: Whether this call represents the completion of a
conceptual unit of a pipeline's processing
:param raise_error: Whether to raise exception if
checkpoint or current state indicates that a halt should occur.
| 4.060853
| 3.693301
| 1.099519
|
message_raw = str(command) + "\t " + \
str(lock_name) + "\t" + \
str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \
str(memory)
with open(self.pipeline_profile_file, "a") as myfile:
myfile.write(message_raw + "\n")
|
def _report_profile(self, command, lock_name, elapsed_time, memory)
|
Writes a string to self.pipeline_profile_file.
| 3.227805
| 2.577691
| 1.252208
|
# Default annotation is current pipeline name.
annotation = str(annotation or self.name)
# In case the value is passed with trailing whitespace.
value = str(value).strip()
# keep the value in memory:
self.stats_dict[key] = value
message_raw = "{key}\t{value}\t{annotation}".format(
key=key, value=value, annotation=annotation)
message_markdown = "\n> `{key}`\t{value}\t{annotation}\t_RES_".format(
key=key, value=value, annotation=annotation)
print(message_markdown)
# Just to be extra careful, let's lock the file while we we write
# in case multiple pipelines write to the same file.
self._safe_write_to_file(self.pipeline_stats_file, message_raw)
|
def report_result(self, key, value, annotation=None)
|
Writes a string to self.pipeline_stats_file.
:param str key: name (key) of the stat
:param str annotation: By default, the stats will be annotated with the pipeline
name, so you can tell which pipeline records which stats. If you want, you can
change this; use annotation='shared' if you need the stat to be used by
another pipeline (using get_stat()).
| 5.551328
| 5.172476
| 1.073244
|
# Default annotation is current pipeline name.
annotation = str(annotation or self.name)
# In case the value is passed with trailing whitespace.
filename = str(filename).strip()
if anchor_text:
anchor_text = str(anchor_text).strip()
else:
anchor_text = str(key).strip()
# better to use a relative path in this file
# convert any absolute paths into relative paths
relative_filename = os.path.relpath(filename, self.outfolder) \
if os.path.isabs(filename) else filename
if anchor_image:
relative_anchor_image = os.path.relpath(anchor_image, self.outfolder) \
if os.path.isabs(anchor_image) else anchor_image
else:
relative_anchor_image = "None"
message_raw = "{key}\t{filename}\t{anchor_text}\t{anchor_image}\t{annotation}".format(
key=key, filename=relative_filename, anchor_text=anchor_text,
anchor_image=relative_anchor_image, annotation=annotation)
message_markdown = "> `{key}`\t{filename}\t{anchor_text}\t{anchor_image}\t{annotation}\t_OBJ_".format(
key=key, filename=relative_filename, anchor_text=anchor_text,
anchor_image=relative_anchor_image,annotation=annotation)
print(message_markdown)
self._safe_write_to_file(self.pipeline_objects_file, message_raw)
|
def report_object(self, key, filename, anchor_text=None, anchor_image=None,
annotation=None)
|
Writes a string to self.pipeline_objects_file. Used to report figures and others.
:param str key: name (key) of the object
:param str filename: relative path to the file (relative to parent output dir)
:param str anchor_text: text used as the link anchor test or caption to
refer to the object. If not provided, defaults to the key.
:param str anchor_image: a path to an HTML-displayable image thumbnail (so,
.png or .jpg, for example). If a path, the path should be relative
to the parent output dir.
:param str annotation: By default, the figures will be annotated with the
pipeline name, so you can tell which pipeline records which figures.
If you want, you can change this.
| 2.746182
| 2.586498
| 1.061738
|
target = file
lock_name = make_lock_name(target, self.outfolder)
lock_file = self._make_lock_path(lock_name)
while True:
if os.path.isfile(lock_file):
self._wait_for_lock(lock_file)
else:
try:
self.locks.append(lock_file)
self._create_file_racefree(lock_file)
except OSError as e:
if e.errno == errno.EEXIST:
print ("Lock file created after test! Looping again.")
continue # Go back to start
# Proceed with file writing
with open(file, "a") as myfile:
myfile.write(message + "\n")
os.remove(lock_file)
self.locks.remove(lock_file)
# If you make it to the end of the while loop, you're done
break
|
def _safe_write_to_file(self, file, message)
|
Writes a string to a file safely (with file locks).
| 4.267626
| 4.102028
| 1.04037
|
if isinstance(procs, list):
procs = ",".join(map(str,procs))
if procs:
line = "\n> `{cmd}` ({procs})\n".format(cmd=str(cmd), procs=procs)
else:
line = "\n> `{cmd}`\n".format(cmd=str(cmd))
print(line)
with open(self.pipeline_commands_file, "a") as myfile:
myfile.write(line + "\n\n")
|
def _report_command(self, cmd, procs=None)
|
Writes a command to both stdout and to the commands log file
(self.pipeline_commands_file).
:param str cmd: command to report
:param str | list[str] procs: process numbers for processes in the command
| 2.953345
| 2.582592
| 1.143558
|
write_lock_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
os.open(file, write_lock_flags)
|
def _create_file_racefree(self, file)
|
Creates a file, but fails if the file already exists.
This function will thus only succeed if this process actually creates
the file; if the file already exists, it will cause an OSError,
solving race conditions.
:param str file: File to create.
| 3.475914
| 4.327162
| 0.803278
|
# For lock prefix validation, separate file name from other path
# components, as we care about the name prefix not path prefix.
base, name = os.path.split(lock_name_base)
lock_name = self._ensure_lock_prefix(name)
if base:
lock_name = os.path.join(base, lock_name)
return pipeline_filepath(self, filename=lock_name)
|
def _make_lock_path(self, lock_name_base)
|
Create path to lock file with given name as base.
:param str lock_name_base: Lock file name, designed to not be prefixed
with the lock file designation, but that's permitted.
:return str: Path to the lock file.
| 7.732098
| 8.101525
| 0.9544
|
# Require that the lock file path be absolute, or at least relative
# and starting with the pipeline output folder.
if not (os.path.isabs(lockfile) or lockfile.startswith(self.outfolder)):
lockfile = self._make_lock_path(lockfile)
return lockfile.replace(LOCK_PREFIX, "recover." + LOCK_PREFIX)
|
def _recoverfile_from_lockfile(self, lockfile)
|
Create path to recovery file with given name as base.
:param str lockfile: Name of file on which to base this path,
perhaps already prefixed with the designation of a lock file.
:return str: Path to recovery file.
| 7.659129
| 7.856059
| 0.974933
|
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
|
def make_sure_path_exists(self, path)
|
Creates all directories in a path if it does not exist.
:param str path: Path to create.
:raises Exception: if the path creation attempt hits an error with
a code indicating a cause other than pre-existence.
| 1.868528
| 2.422615
| 0.771286
|
# regex identifies all possible stats files.
#regex = self.outfolder + "*_stats.tsv"
#stats_files = glob.glob(regex)
#stats_files.insert(self.pipeline_stats_file) # last one is the current pipeline
#for stats_file in stats_files:
stats_file = self.pipeline_stats_file
if os.path.isfile(self.pipeline_stats_file):
with open(stats_file, 'r') as stat_file:
for line in stat_file:
try:
# Someone may have put something that's not 3 columns in the stats file
# if so, shame on him, but we can just ignore it.
key, value, annotation = line.split('\t')
except ValueError:
print("WARNING: Each row in a stats file is expected to have 3 columns")
if annotation.rstrip() == self.name or annotation.rstrip() == "shared":
self.stats_dict[key] = value.strip()
|
def _refresh_stats(self)
|
Loads up the stats sheet created for this pipeline run and reads
those stats into memory
| 5.996138
| 5.610402
| 1.068754
|
try:
return self.stats_dict[key]
except KeyError:
self._refresh_stats()
try:
return self.stats_dict[key]
except KeyError:
print("Missing stat '{}'".format(key))
return None
|
def get_stat(self, key)
|
Returns a stat that was previously reported. This is necessary for reporting new stats that are
derived from two stats, one of which may have been reported by an earlier run. For example,
if you first use report_result to report (number of trimmed reads), and then in a later stage
want to report alignment rate, then this second stat (alignment rate) will require knowing the
first stat (number of trimmed reads); however, that may not have been calculated in the current
pipeline run, so we must retrieve it from the stats.tsv output file. This command will retrieve
such previously reported stats if they were not already calculated in the current pipeline run.
:param key: key of stat to retrieve
| 3.045545
| 3.060305
| 0.995177
|
# For null stage, short-circuit and indicate no file write.
# This handles case in which we're timestamping prospectively and
# previously weren't in a stage.
if stage is None:
return False
try:
is_checkpoint = stage.checkpoint
except AttributeError:
# Maybe we have a raw function, not a stage.
if hasattr(stage, "__call__"):
stage = stage.__name__
else:
# Maybe we have a stage name not a Stage.
# In that case, we can proceed as-is, with downstream
# processing handling Stage vs. stage name disambiguation.
# Here, though, warn about inputs that appear filename/path-like.
# We can't rely on raw text being a filepath or filename,
# because that would ruin the ability to pass stage name rather
# than actual stage. We can issue a warning message based on the
# improbability of a stage name containing the '.' that would
# be expected to characterize the extension of a file name/path.
base, ext = os.path.splitext(stage)
if ext and "." not in base:
print("WARNING: '{}' looks like it may be the name or path of "
"a file; for such a checkpoint, use touch_checkpoint.".
format(stage))
else:
if not is_checkpoint:
print("Not a checkpoint: {}".format(stage))
return False
stage = stage.name
print("Checkpointing: '{}'".format(stage))
if os.path.isabs(stage):
check_fpath = stage
else:
check_fpath = checkpoint_filepath(stage, pm=self)
return self._touch_checkpoint(check_fpath)
|
def _checkpoint(self, stage)
|
Decide whether to stop processing of a pipeline. This is the hook
A pipeline can report various "checkpoints" as sort of status markers
that designate the logical processing phase that's just been completed.
The initiation of a pipeline can preordain one of those as a "stopping
point" that when reached, should stop the pipeline's execution.
:param pypiper.Stage | str stage: Pipeline processing stage/phase just completed.
:return bool: Whether a checkpoint was created (i.e., whether it didn't
already exist)
:raise ValueError: If the stage is specified as an absolute filepath,
and that path indicates a location that's not immediately within
the main output folder, raise a ValueError.
| 8.768884
| 8.453394
| 1.037321
|
if os.path.isabs(check_file):
folder, _ = os.path.split(check_file)
# For raw string comparison, ensure that each path
# bears the final path separator.
other_folder = os.path.join(folder, "")
this_folder = os.path.join(self.outfolder, "")
if other_folder != this_folder:
errmsg = "Path provided as checkpoint file isn't in pipeline " \
"output folder. '{}' is not in '{}'".format(
check_file, self.outfolder)
raise ValueError(errmsg)
fpath = check_file
else:
fpath = pipeline_filepath(self, filename=check_file)
# Create/update timestamp for checkpoint, but base return value on
# whether the action was a simple update or a novel creation.
already_exists = os.path.isfile(fpath)
open(fpath, 'w').close()
action = "Updated" if already_exists else "Created"
print("{} checkpoint file: '{}'".format(action, fpath))
return already_exists
|
def _touch_checkpoint(self, check_file)
|
Alternative way for a pipeline to designate a checkpoint.
:param str check_file: Name or path of file to use as checkpoint.
:return bool: Whether a file was written (equivalent to whether the
checkpoint file already existed).
:raise ValueError: Raise a ValueError if the argument provided as the
checkpoint file is an absolute path and that doesn't correspond
to a location within the main output folder.
| 5.062363
| 4.47381
| 1.131555
|
# Take care of any active running subprocess
sys.stdout.flush()
self._terminate_running_subprocesses()
if dynamic_recover:
# job was terminated, not failed due to a bad process.
# flag this run as recoverable.
if len(self.locks) < 1:
# If there is no process locked, then recovery will be automatic.
print("No locked process. Dynamic recovery will be automatic.")
# make a copy of self.locks to iterate over since we'll be clearing them as we go
# set a recovery flag for each lock.
for lock_file in self.locks[:]:
recover_file = self._recoverfile_from_lockfile(lock_file)
print("Setting dynamic recover file: {}".format(recover_file))
self._create_file(recover_file)
self.locks.remove(lock_file)
# Produce cleanup script
self._cleanup(dry_run=True)
# Finally, set the status to failed and close out with a timestamp
if not self._failed: # and not self._completed:
self.timestamp("### Pipeline failed at: ")
total_time = datetime.timedelta(seconds=self.time_elapsed(self.starttime))
print("Total time: " + str(total_time))
self._set_status_flag(FAIL_FLAG)
raise e
|
def fail_pipeline(self, e, dynamic_recover=False)
|
If the pipeline does not complete, this function will stop the pipeline gracefully.
It sets the status flag to failed and skips the normal success completion procedure.
:param Exception e: Exception to raise.
:param bool dynamic_recover: Whether to recover e.g. for job termination.
| 7.288017
| 7.516611
| 0.969588
|
self.stop_pipeline(PAUSE_FLAG)
self._active = False
if raise_error:
raise PipelineHalt(checkpoint, finished)
|
def halt(self, checkpoint=None, finished=False, raise_error=True)
|
Stop the pipeline before completion point.
:param str checkpoint: Name of stage just reached or just completed.
:param bool finished: Whether the indicated stage was just finished
(True), or just reached (False)
:param bool raise_error: Whether to raise an exception to truly
halt execution.
| 9.382702
| 9.786435
| 0.958746
|
self._set_status_flag(status)
self._cleanup()
self.report_result("Time", str(datetime.timedelta(seconds=self.time_elapsed(self.starttime))))
self.report_result("Success", time.strftime("%m-%d-%H:%M:%S"))
print("\n##### [Epilogue:]")
print("* " + "Total elapsed time".rjust(20) + ": " + str(datetime.timedelta(seconds=self.time_elapsed(self.starttime))))
# print("Peak memory used: " + str(memory_usage()["peak"]) + "kb")
print("* " + "Peak memory used".rjust(20) + ": " + str(round(self.peak_memory, 2)) + " GB")
if self.halted:
return
self.timestamp("* Pipeline completed at: ".rjust(20))
|
def stop_pipeline(self, status=COMPLETE_FLAG)
|
Terminate the pipeline.
This is the "healthy" pipeline completion function.
The normal pipeline completion function, to be run by the pipeline
at the end of the script. It sets status flag to completed and records
some time and memory statistics to the log file.
| 5.275828
| 5.261236
| 1.002773
|
print("</pre>")
message = "Got " + signal_type + ". Failing gracefully..."
self.timestamp(message)
self.fail_pipeline(KeyboardInterrupt(signal_type), dynamic_recover=True)
sys.exit(1)
|
def _generic_signal_handler(self, signal_type)
|
Function for handling both SIGTERM and SIGINT
| 13.731616
| 12.973715
| 1.058418
|
# TODO: consider handling sys.stderr/sys.stdout exceptions related to
# TODO (cont.): order of interpreter vs. subprocess shutdown signal receipt.
# TODO (cont.): see https://bugs.python.org/issue11380
# Make the cleanup file executable if it exists
if os.path.isfile(self.cleanup_file):
# Make the cleanup file self destruct.
with open(self.cleanup_file, "a") as myfile:
myfile.write("rm " + self.cleanup_file + "\n")
os.chmod(self.cleanup_file, 0o755)
# If the pipeline hasn't completed successfully, or already been marked
# as failed, then mark it as failed now.
if not self._has_exit_status:
print("Pipeline status: {}".format(self.status))
self.fail_pipeline(Exception("Pipeline failure. See details above."))
if self.tee:
self.tee.kill()
|
def _exit_handler(self)
|
This function I register with atexit to run whenever the script is completing.
A catch-all for uncaught exceptions, setting status flag file to failed.
| 7.326778
| 6.939795
| 1.055763
|
# When we kill process, it turns into a zombie, and we have to reap it.
# So we can't just kill it and then let it go; we call wait
def pskill(proc_pid, sig=signal.SIGINT):
parent_process = psutil.Process(proc_pid)
for child_proc in parent_process.children(recursive=True):
child_proc.send_signal(sig)
parent_process.send_signal(sig)
if child_pid is None:
return
if proc_name:
proc_string = " ({proc_name})".format(proc_name=proc_name)
# First a gentle kill
sys.stdout.flush()
still_running = self._attend_process(psutil.Process(child_pid), 0)
sleeptime = .25
time_waiting = 0
while still_running and time_waiting < 3:
try:
if time_waiting > 2:
pskill(child_pid, signal.SIGKILL)
# print("pskill("+str(child_pid)+", signal.SIGKILL)")
elif time_waiting > 1:
pskill(child_pid, signal.SIGTERM)
# print("pskill("+str(child_pid)+", signal.SIGTERM)")
else:
pskill(child_pid, signal.SIGINT)
# print("pskill("+str(child_pid)+", signal.SIGINT)")
except OSError:
# This would happen if the child process ended between the check
# and the next kill step
still_running = False
time_waiting = time_waiting + sleeptime
# Now see if it's still running
time_waiting = time_waiting + sleeptime
if not self._attend_process(psutil.Process(child_pid), sleeptime):
still_running = False
if still_running:
# still running!?
print("Child process {child_pid}{proc_string} never responded"
"I just can't take it anymore. I don't know what to do...".format(child_pid=child_pid,
proc_string=proc_string))
else:
if time_waiting > 0:
note = "terminated after {time} sec".format(time=int(time_waiting))
else:
note = "was already terminated"
msg = "Child process {child_pid}{proc_string} {note}.".format(
child_pid=child_pid, proc_string=proc_string, note=note)
print(msg)
|
def _kill_child_process(self, child_pid, proc_name=None)
|
Pypiper spawns subprocesses. We need to kill them to exit gracefully,
in the event of a pipeline termination or interrupt signal.
By default, child processes are not automatically killed when python
terminates, so Pypiper must clean these up manually.
Given a process ID, this function just kills it.
:param int child_pid: Child process id.
| 3.315553
| 3.388906
| 0.978355
|
if self.dirty:
# Override the user-provided option and force manual cleanup.
manual = True
if not self.clean_initialized:
# Make cleanup files relative to the cleanup script in case the result folder moves.
with open(self.cleanup_file, "a") as myfile:
clean_init = 'DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"'
myfile.write(clean_init + "\n")
myfile.write("cd ${DIR}\n")
self.clean_initialized = True
if manual:
try:
filenames = glob.glob(regex)
for filename in filenames:
with open(self.cleanup_file, "a") as myfile:
relative_filename = os.path.relpath(filename, self.outfolder) \
if os.path.isabs(filename) else filename
if os.path.isfile(relative_filename):
myfile.write("rm " + relative_filename + "\n")
elif os.path.isdir(relative_filename):
# first, add all filenames in the directory
myfile.write("rm " + relative_filename + "/*\n")
# and the directory itself
myfile.write("rmdir " + relative_filename + "\n")
except:
pass
elif conditional:
self.cleanup_list_conditional.append(regex)
else:
self.cleanup_list.append(regex)
# TODO: what's the "absolute" list?
# Remove it from the conditional list if added to the absolute list
while regex in self.cleanup_list_conditional:
self.cleanup_list_conditional.remove(regex)
|
def clean_add(self, regex, conditional=False, manual=False)
|
Add files (or regexs) to a cleanup list, to delete when this pipeline completes successfully.
When making a call with run that produces intermediate files that should be
deleted after the pipeline completes, you flag these files for deletion with this command.
Files added with clean_add will only be deleted upon success of the pipeline.
:param str regex: A unix-style regular expression that matches files to delete
(can also be a file name).
:param bool conditional: True means the files will only be deleted if no other
pipelines are currently running; otherwise they are added to a manual cleanup script
called {pipeline_name}_cleanup.sh
:param bool manual: True means the files will just be added to a manual cleanup script.
| 3.88518
| 3.831219
| 1.014084
|
if dry_run:
# Move all unconditional cleans into the conditional list
if len(self.cleanup_list) > 0:
combined_list = self.cleanup_list_conditional + self.cleanup_list
self.cleanup_list_conditional = combined_list
self.cleanup_list = []
if len(self.cleanup_list) > 0:
print("\nCleaning up flagged intermediate files. . .")
for expr in self.cleanup_list:
print("\nRemoving glob: " + expr)
try:
# Expand regular expression
files = glob.glob(expr)
# Remove entry from cleanup list
while files in self.cleanup_list:
self.cleanup_list.remove(files)
# and delete the files
for file in files:
if os.path.isfile(file):
print("`rm " + file + "`")
os.remove(os.path.join(file))
elif os.path.isdir(file):
print("`rmdir " + file + "`")
os.rmdir(os.path.join(file))
except:
pass
if len(self.cleanup_list_conditional) > 0:
run_flag = flag_name(RUN_FLAG)
flag_files = [fn for fn in glob.glob(self.outfolder + flag_name("*"))
if COMPLETE_FLAG not in os.path.basename(fn)
and not "{}_{}".format(self.name, run_flag) == os.path.basename(fn)]
if len(flag_files) == 0 and not dry_run:
print("\nCleaning up conditional list. . .")
for expr in self.cleanup_list_conditional:
print("\nRemoving glob: " + expr)
try:
files = glob.glob(expr)
while files in self.cleanup_list_conditional:
self.cleanup_list_conditional.remove(files)
for file in files:
if os.path.isfile(file):
print("`rm " + file + "`")
os.remove(os.path.join(file))
elif os.path.isdir(file):
print("`rmdir " + file + "`")
os.rmdir(os.path.join(file))
except:
pass
else:
print("\nConditional flag found: " + str([os.path.basename(i) for i in flag_files]))
print("\nThese conditional files were left in place:\n\n- " + "\n- ".join(self.cleanup_list_conditional))
# Produce a cleanup script.
no_cleanup_script = []
for cleandir in self.cleanup_list_conditional:
try:
items_to_clean = glob.glob(cleandir)
for clean_item in items_to_clean:
with open(self.cleanup_file, "a") as clean_script:
if os.path.isfile(clean_item):
clean_script.write("rm " + clean_item + "\n")
elif os.path.isdir(clean_item):
clean_script.write("rmdir " + clean_item + "\n")
except Exception as e:
no_cleanup_script.append(cleandir)
if no_cleanup_script:
print('\n\nCould not produce cleanup script for item(s):\n\n- ' + '\n- '.join(no_cleanup_script))
|
def _cleanup(self, dry_run=False)
|
Cleans up (removes) intermediate files.
You can register intermediate files, which will be deleted automatically
when the pipeline completes. This function deletes them,
either absolutely or conditionally. It is run automatically when the
pipeline succeeds, so you shouldn't need to call it from a pipeline.
:param bool dry_run: Set to True if you want to build a cleanup
script, but not actually delete the files.
| 2.465043
| 2.443497
| 1.008817
|
if container:
# TODO: Put some debug output here with switch to Logger
# since this is relatively untested.
cmd = "docker stats " + container + " --format '{{.MemUsage}}' --no-stream"
mem_use_str = subprocess.check_output(cmd, shell=True)
mem_use = mem_use_str.split("/")[0].split()
mem_num = re.findall('[\d\.]+', mem_use_str.split("/")[0])[0]
mem_scale = re.findall('[A-Za-z]+', mem_use_str.split("/")[0])[0]
#print(mem_use_str, mem_num, mem_scale)
mem_num = float(mem_num)
if mem_scale == "GiB":
return mem_num * 1e6
elif mem_scale == "MiB":
return mem_num * 1e3
elif mem_scale == "KiB":
return mem_num
else:
# What type is this?
return 0
# Thanks Martin Geisler:
status = None
result = {'peak': 0, 'rss': 0, 'hwm': 0}
try:
# This will only work on systems with a /proc file system
# (like Linux).
# status = open('/proc/self/status')
proc_spot = '/proc/%s/status' % pid
status = open(proc_spot)
for line in status:
parts = line.split()
key = parts[0][2:-1].lower()
if key in result:
result[key] = int(parts[1])
except:
return 0
finally:
if status is not None:
status.close()
# print(result[category])
return result[category]
|
def _memory_usage(self, pid='self', category="hwm", container=None)
|
Memory usage of the process in kilobytes.
:param str pid: Process ID of process to check
:param str category: Memory type to check. 'hwm' for high water mark.
| 3.53908
| 3.667323
| 0.965031
|
if not nofail:
self.fail_pipeline(e)
elif self._failed:
print("This is a nofail process, but the pipeline was terminated for other reasons, so we fail.")
raise e
else:
print(e)
print("ERROR: Subprocess returned nonzero result, but pipeline is continuing because nofail=True")
|
def _triage_error(self, e, nofail)
|
Print a message and decide what to do about an error.
| 9.085966
| 8.935834
| 1.016801
|
path_reqs_file = os.path.join(
"requirements", "reqs-{}.txt".format(reqs_name))
with open(path_reqs_file, 'r') as reqs_file:
return [pkg.rstrip() for pkg in reqs_file.readlines()
if not pkg.startswith("#")]
|
def read_reqs_file(reqs_name)
|
Read requirements file for given requirements group.
| 2.74222
| 2.714023
| 1.01039
|
if not isinstance(collection, Iterable):
raise TypeError("Non-iterable alleged collection: {}".
format(type(collection)))
return isinstance(collection, set) or \
(isinstance(collection, dict) and
not isinstance(collection, OrderedDict))
|
def _is_unordered(collection)
|
Determine whether a collection appears to be unordered.
This is a conservative implementation, allowing for the possibility that
someone's implemented Mapping or Set, for example, and provided an
__iter__ implementation that defines a consistent ordering of the
collection's elements.
:param object collection: Object to check as an unordered collection.
:return bool: Whether the given object appears to be unordered
:raises TypeError: If the given "collection" is non-iterable, it's
illogical to investigate whether it's ordered.
| 5.105458
| 5.25159
| 0.972174
|
# The logic used here, a message to a user about how to specify Stage.
req_msg = "Stage specification must be either a {0} itself, a " \
"(<name>, {0}) pair, or a callable with a __name__ attribute " \
"(e.g., a non-anonymous function)".format(Stage.__name__)
# Simplest case is stage itself.
if isinstance(stage_spec, Stage):
return stage_spec.name, stage_spec
# Handle alternate forms of specification.
try:
# Unpack pair of name and stage, requiring name first.
name, stage = stage_spec
except (TypeError, ValueError):
# Normally, this sort of unpacking issue create a ValueError. Here,
# though, we also need to catch TypeError since that's what arises
# if an attempt is made to unpack a single function.
# Attempt to parse stage_spec as a single named callable.
try:
name = stage_spec.__name__
except AttributeError:
raise TypeError(req_msg)
else:
# Control flow here indicates an anonymous function that was not
# paired with a name. Prohibit that.
if name == (lambda: None).__name__:
raise TypeError(req_msg)
stage = stage_spec
# Ensure that the stage is callable.
if not hasattr(stage, "__call__"):
raise TypeError(req_msg)
return name, Stage(stage, name=name)
|
def _parse_stage_spec(stage_spec)
|
Handle alternate Stage specifications, returning name and Stage.
Isolate this parsing logic from any iteration. TypeError as single
exception type funnel also provides a more uniform way for callers to
handle specification errors (e.g., skip a stage, warn, re-raise, etc.)
:param (str, pypiper.Stage) | callable stage_spec: name and Stage
:return (name, pypiper.Stage): Pair of name and Stage instance from parsing
input specification
:raise TypeError: if the specification of the stage is not a supported type
| 6.250203
| 5.55068
| 1.126025
|
# Canonical usage model for Pipeline checkpointing through
# implementations of this class is by automatically creating a
# checkpoint when a conceptual unit or group of operations of a
# pipeline completes, so fix the 'finished' parameter to the manager's
# timestamp method to be True.
return self.manager.timestamp(
message=msg, checkpoint=stage.checkpoint_name, finished=True)
|
def checkpoint(self, stage, msg="")
|
Touch checkpoint file for given stage and provide timestamp message.
:param pypiper.Stage stage: Stage for which to mark checkpoint
:param str msg: Message to embed in timestamp.
:return bool: Whether a checkpoint file was written.
| 32.68409
| 26.351629
| 1.240306
|
check_path = checkpoint_filepath(stage, self.manager)
return os.path.exists(check_path)
|
def completed_stage(self, stage)
|
Determine whether the pipeline's completed the stage indicated.
:param pypiper.Stage stage: Stage to check for completion status.
:return bool: Whether this pipeline's completed the indicated stage.
:raises UnknownStageException: If the stage name given is undefined
for the pipeline, a ValueError arises.
| 10.501155
| 15.165275
| 0.692447
|
paths = glob.glob(os.path.join(self.outfolder, flag_name("*")))
if only_name:
return [os.path.split(p)[1] for p in paths]
else:
return paths
|
def list_flags(self, only_name=False)
|
Determine the flag files associated with this pipeline.
:param bool only_name: Whether to return only flag file name(s) (True),
or full flag file paths (False); default False (paths)
:return list[str]: flag files associated with this pipeline.
| 3.542232
| 3.95347
| 0.89598
|
# Start the run with a clean slate of Stage status/label tracking.
self._reset()
# TODO: validate starting point against checkpoint flags for
# TODO (cont.): earlier stages if the pipeline defines its stages as a
# TODO (cont.): sequence (i.e., probably prohibit start point with
# TODO (cont): nonexistent earlier checkpoint flag(s).)
if stop_before and stop_after:
raise IllegalPipelineExecutionError(
"Cannot specify both inclusive and exclusive stops.")
if stop_before:
stop = stop_before
inclusive_stop = False
elif stop_after:
stop = stop_after
inclusive_stop = True
else:
stop = None
inclusive_stop = None
# Ensure that a stage name--if specified--is supported.
for s in [start_point, stop]:
if s is None:
continue
name = parse_stage_name(s)
if name not in self.stage_names:
raise UnknownPipelineStageError(name, self)
# Permit order-agnostic pipelines, but warn.
if self._unordered and (start_point or stop_before or stop_after):
print("WARNING: Starting and stopping points are nonsense for "
"pipeline with unordered stages.")
# TODO: consider context manager based on start/stop points.
# Determine where to start (but perhaps skip further based on
# checkpoint completions.)
start_index = self._start_index(start_point)
stop_index = self._stop_index(stop, inclusive=inclusive_stop)
assert stop_index <= len(self._stages)
if start_index >= stop_index:
raise IllegalPipelineExecutionError(
"Cannot start pipeline at or after stopping point")
# TODO: consider storing just stage name rather than entire stage.
# TODO (cont.): the bad case for whole-Stage is if associated data
# TODO (cont.): (i.e., one or more args) are large.
self.skipped.extend(self._stages[:start_index])
# TODO: support both courses of action for non-continuous checkpoints.
# TODO (cont.): That is, what if there's a stage with a checkpoint
# TODO (cont.): file downstream of one without it? Naively, we'll
# TODO (cont.): skip it, but we may want to re-run.
skip_mode = True
for stage in self._stages[start_index:stop_index]:
# TODO: Note that there's no way to tell whether a non-checkpointed
# TODO (cont.) Stage has been completed, and thus this seek
# TODO (cont.) operation will find the first Stage, starting
# TODO (cont.) the specified start point, either uncheckpointed or
# TODO (cont.) for which the checkpoint file does not exist.
# Look for checkpoint file.
if skip_mode and self.completed_stage(stage):
print("Skipping completed checkpoint stage: {}".format(stage))
self.skipped.append(stage)
continue
# Once we've found where to being execution, ignore checkpoint
# flags downstream if they exist since there may be dependence
# between results from different stages.
skip_mode = False
print("Running stage: {}".format(stage))
stage.run()
self.executed.append(stage)
self.checkpoint(stage)
# Add any unused stages to the collection of skips.
self.skipped.extend(self._stages[stop_index:])
# Where we stopped determines the shutdown mode.
if stop_index == len(self._stages):
self.wrapup()
else:
self.halt(raise_error=False)
|
def run(self, start_point=None, stop_before=None, stop_after=None)
|
Run the pipeline, optionally specifying start and/or stop points.
:param str start_point: Name of stage at which to begin execution.
:param str stop_before: Name of stage at which to cease execution;
exclusive, i.e. this stage is not run
:param str stop_after: Name of stage at which to cease execution;
inclusive, i.e. this stage is the last one run
:raise IllegalPipelineExecutionError: If both inclusive (stop_after)
and exclusive (stop_before) halting points are provided, or if that
start stage is the same as or after the stop stage, raise an
IllegalPipelineExecutionError.
| 6.905105
| 6.691127
| 1.031979
|
if start is None:
return 0
start_stage = translate_stage_name(start)
internal_names = [translate_stage_name(s.name) for s in self._stages]
try:
return internal_names.index(start_stage)
except ValueError:
raise UnknownPipelineStageError(start, self)
|
def _start_index(self, start=None)
|
Seek to the first stage to run.
| 4.045114
| 3.41111
| 1.185865
|
if not stop_point:
# Null case, no stopping point
return len(self._stages)
stop_name = parse_stage_name(stop_point)
try:
stop_index = self.stage_names.index(stop_name)
except ValueError:
raise UnknownPipelineStageError(stop_name, self)
return stop_index + 1 if inclusive else stop_index
|
def _stop_index(self, stop_point, inclusive)
|
Determine index of stage of stopping point for run().
:param str | pypiper.Stage | function stop_point: Stopping point itself
or name of it.
:param bool inclusive: Whether the stopping point is to be regarded as
inclusive (i.e., whether it's the final stage to run, or the one
just beyond)
:return int: Index into sequence of Pipeline's stages that indicates
where to stop; critically, the value of the inclusive parameter
here is used to contextualize this index such that it's always
returned as an exclusive stopping index (i.e., execute up to the
stage indexed by the value returned from this function.)
| 3.923707
| 3.537383
| 1.109212
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.