Search is not available for this dataset
text
stringlengths 75
104k
|
|---|
def build_action(name=None,
image_uri=None,
commands=None,
entrypoint=None,
environment=None,
pid_namespace=None,
flags=None,
port_mappings=None,
mounts=None,
labels=None):
"""Build an Action object for a Pipeline request.
Args:
name (str): An optional name for the container.
image_uri (str): The URI to pull the container image from.
commands (List[str]): commands and arguments to run inside the container.
entrypoint (str): overrides the ENTRYPOINT specified in the container.
environment (dict[str,str]): The environment to pass into the container.
pid_namespace (str): The PID namespace to run the action inside.
flags (str): Flags that control the execution of this action.
port_mappings (dict[int, int]): A map of container to host port mappings for
this container.
mounts (List): A list of mounts to make available to the action.
labels (dict[str]): Labels to associate with the action.
Returns:
An object representing an Action resource.
"""
return {
'name': name,
'imageUri': image_uri,
'commands': commands,
'entrypoint': entrypoint,
'environment': environment,
'pidNamespace': pid_namespace,
'flags': flags,
'portMappings': port_mappings,
'mounts': mounts,
'labels': labels,
}
|
def lookup_job_tasks(self,
statuses,
user_ids=None,
job_ids=None,
job_names=None,
task_ids=None,
task_attempts=None,
labels=None,
create_time_min=None,
create_time_max=None,
max_tasks=0):
"""Return a list of operations. See base.py for additional detail."""
statuses = None if statuses == {'*'} else statuses
user_ids = None if user_ids == {'*'} else user_ids
job_ids = None if job_ids == {'*'} else job_ids
job_names = None if job_names == {'*'} else job_names
task_ids = None if task_ids == {'*'} else task_ids
task_attempts = None if task_attempts == {'*'} else task_attempts
if labels or create_time_min or create_time_max:
raise NotImplementedError(
'Lookup by labels and create_time not yet supported by stub.')
operations = [
x for x in self._operations
if ((not statuses or x.get_field('status', (None, None))[0] in statuses
) and (not user_ids or x.get_field('user', None) in user_ids) and
(not job_ids or x.get_field('job-id', None) in job_ids) and
(not job_names or x.get_field('job-name', None) in job_names) and
(not task_ids or x.get_field('task-id', None) in task_ids) and
(not task_attempts or
x.get_field('task-attempt', None) in task_attempts))
]
if max_tasks > 0:
operations = operations[:max_tasks]
return operations
|
def get_provider(args, resources):
"""Returns a provider for job submission requests."""
provider = getattr(args, 'provider', 'google')
if provider == 'google':
return google.GoogleJobProvider(
getattr(args, 'verbose', False),
getattr(args, 'dry_run', False), args.project)
elif provider == 'google-v2':
return google_v2.GoogleV2JobProvider(
getattr(args, 'verbose', False), getattr(args, 'dry_run', False),
args.project)
elif provider == 'local':
return local.LocalJobProvider(resources)
elif provider == 'test-fails':
return test_fails.FailsJobProvider()
else:
raise ValueError('Unknown provider: ' + provider)
|
def create_parser(prog):
"""Create an argument parser, adding in the list of providers."""
parser = argparse.ArgumentParser(prog=prog, formatter_class=DsubHelpFormatter)
parser.add_argument(
'--provider',
default='google-v2',
choices=['local', 'google', 'google-v2', 'test-fails'],
help="""Job service provider. Valid values are "google-v2" (Google's
Pipeline API v2) and "local" (local Docker execution). "test-*"
providers are for testing purposes only.""",
metavar='PROVIDER')
return parser
|
def parse_args(parser, provider_required_args, argv):
"""Add provider required arguments epilog message, parse, and validate."""
# Add the provider required arguments epilog message
epilog = 'Provider-required arguments:\n'
for provider in provider_required_args:
epilog += ' %s: %s\n' % (provider, provider_required_args[provider])
parser.epilog = epilog
# Parse arguments
args = parser.parse_args(argv)
# For the selected provider, check the required arguments
for arg in provider_required_args[args.provider]:
if not args.__getattribute__(arg):
parser.error('argument --%s is required' % arg)
return args
|
def get_dstat_provider_args(provider, project):
"""A string with the arguments to point dstat to the same provider+project."""
provider_name = get_provider_name(provider)
args = []
if provider_name == 'google':
args.append('--project %s' % project)
elif provider_name == 'google-v2':
args.append('--project %s' % project)
elif provider_name == 'local':
pass
elif provider_name == 'test-fails':
pass
else:
# New providers should add their dstat required arguments here.
assert False, 'Provider %s needs get_dstat_provider_args support' % provider
args.insert(0, '--provider %s' % provider_name)
return ' '.join(args)
|
def _format_task_uri(fmt, job_metadata, task_metadata):
"""Returns a URI with placeholders replaced by metadata values."""
values = {
'job-id': None,
'task-id': 'task',
'job-name': None,
'user-id': None,
'task-attempt': None
}
for key in values:
values[key] = task_metadata.get(key) or job_metadata.get(key) or values[key]
return fmt.format(**values)
|
def format_logging_uri(uri, job_metadata, task_metadata):
"""Inserts task metadata into the logging URI.
The core behavior is inspired by the Google Pipelines API:
(1) If a the uri ends in ".log", then that is the logging path.
(2) Otherwise, the uri is treated as "directory" for logs and a filename
needs to be automatically generated.
For (1), if the job is a --tasks job, then the {task-id} is inserted
before ".log".
For (2), the file name generated is {job-id}, or for --tasks jobs, it is
{job-id}.{task-id}.
In both cases .{task-attempt} is inserted before .log for --retries jobs.
In addition, full task metadata substitution is supported. The URI
may include substitution strings such as
"{job-id}", "{task-id}", "{job-name}", "{user-id}", and "{task-attempt}".
Args:
uri: User-specified logging URI which may contain substitution fields.
job_metadata: job-global metadata.
task_metadata: tasks-specific metadata.
Returns:
The logging_uri formatted as described above.
"""
# If the user specifies any formatting (with curly braces), then use that
# as the format string unchanged.
fmt = str(uri)
if '{' not in fmt:
if uri.endswith('.log'):
# URI includes a filename. Trim the extension and just use the prefix.
fmt = os.path.splitext(uri)[0]
else:
# URI is a path to a directory. The job-id becomes the filename prefix.
fmt = os.path.join(uri, '{job-id}')
# If this is a task job, add the task-id.
if task_metadata.get('task-id') is not None:
fmt += '.{task-id}'
# If this is a retryable task, add the task-attempt.
if task_metadata.get('task-attempt') is not None:
fmt += '.{task-attempt}'
fmt += '.log'
return _format_task_uri(fmt, job_metadata, task_metadata)
|
def _google_v2_parse_arguments(args):
"""Validated google-v2 arguments."""
if (args.zones and args.regions) or (not args.zones and not args.regions):
raise ValueError('Exactly one of --regions and --zones must be specified')
if args.machine_type and (args.min_cores or args.min_ram):
raise ValueError(
'--machine-type not supported together with --min-cores or --min-ram.')
|
def _parse_arguments(prog, argv):
"""Parses command line arguments.
Args:
prog: The path of the program (dsub.py) or an alternate program name to
display in usage.
argv: The list of program arguments to parse.
Returns:
A Namespace of parsed arguments.
"""
# Handle version flag and exit if it was passed.
param_util.handle_version_flag()
parser = provider_base.create_parser(prog)
# Add dsub core job submission arguments
parser.add_argument(
'--version', '-v', default=False, help='Print the dsub version and exit.')
parser.add_argument(
'--unique-job-id',
default=False,
action='store_true',
help="""Experimental: create a unique 32 character UUID for the dsub
job-id using https://docs.python.org/3/library/uuid.html.""")
parser.add_argument(
'--name',
help="""Name for pipeline. Defaults to the script name or
first token of the --command if specified.""")
parser.add_argument(
'--tasks',
nargs='*',
action=TaskParamAction,
help="""Path to a file of tab separated values (TSV) for task parameters.
The file may be located in the local filesystem or in a Google Cloud
Storage bucket.
The first line is a list of column headers specifying an --env,
--input, --input-recursive, --output or --output-recursive variable,
and each subsequent line specifies the values for a task.
Optionally specify tasks from the file to submit. Can take the form
"m", "m-", or "m-n" where m and n are task numbers starting at 1.""",
metavar='FILE M-N')
parser.add_argument(
'--image',
default='ubuntu:14.04',
help="""Image name from Docker Hub, Google Container Repository, or other
Docker image service. The pipeline must have READ access to the
image.""")
parser.add_argument(
'--dry-run',
default=False,
action='store_true',
help='Print the pipeline(s) that would be run and then exit.')
parser.add_argument(
'--command',
help="""Command to run inside the job\'s Docker container. This argument
or the --script argument must be provided.""",
metavar='COMMAND')
parser.add_argument(
'--script',
help="""Path to a script that is located in the local file system or
inside a Google Cloud Storage bucket. This script will be run inside
the job\'s Docker container. This argument or the --command
argument must be provided.""",
metavar='SCRIPT')
parser.add_argument(
'--env',
nargs='*',
action=param_util.ListParamAction,
default=[],
help='Environment variables for the script\'s execution environment',
metavar='KEY=VALUE')
parser.add_argument(
'--label',
nargs='*',
action=param_util.ListParamAction,
default=[],
help='Labels to associate to the job.',
metavar='KEY=VALUE')
parser.add_argument(
'--input',
nargs='*',
action=param_util.ListParamAction,
default=[],
help="""Input path arguments to localize into the script's execution
environment""",
metavar='KEY=REMOTE_PATH')
parser.add_argument(
'--input-recursive',
nargs='*',
action=param_util.ListParamAction,
default=[],
help="""Input path arguments to localize recursively into the script\'s
execution environment""",
metavar='KEY=REMOTE_PATH')
parser.add_argument(
'--output',
nargs='*',
action=param_util.ListParamAction,
default=[],
help="""Output path arguments to de-localize from the script\'s execution
environment""",
metavar='KEY=REMOTE_PATH')
parser.add_argument(
'--output-recursive',
nargs='*',
action=param_util.ListParamAction,
default=[],
help="""Output path arguments to de-localize recursively from the script's
execution environment""",
metavar='KEY=REMOTE_PATH')
parser.add_argument(
'--user',
'-u',
help='User submitting the dsub job, defaults to the current OS user.')
parser.add_argument(
'--user-project',
help="""Specify a user project to be billed for all requests to Google
Cloud Storage (logging, localization, delocalization). This flag exists
to support accessing Requester Pays buckets""")
parser.add_argument(
'--mount',
nargs='*',
action=param_util.ListParamAction,
default=[],
help="""Mount a resource such as a bucket, disk, or directory into your
Docker container""",
metavar='KEY=PATH_SPEC')
# Add dsub job management arguments
parser.add_argument(
'--wait',
action='store_true',
help='Wait for the job to finish all its tasks.')
parser.add_argument(
'--retries',
default=0,
type=int,
help='Number of retries to perform on failed tasks.')
parser.add_argument(
'--poll-interval',
default=10,
type=int,
help='Polling interval (in seconds) for checking job status '
'when --wait or --after are set.')
parser.add_argument(
'--after',
nargs='+',
default=[],
help='Job ID(s) to wait for before starting this job.')
parser.add_argument(
'--skip',
default=False,
action='store_true',
help="""Do not submit the job if all output specified using the --output
and --output-recursive parameters already exist. Note that wildcard
and recursive outputs cannot be strictly verified. See the
documentation for details.""")
# Add dsub resource requirement arguments
parser.add_argument(
'--min-cores',
type=int,
help='Minimum CPU cores for each job')
parser.add_argument(
'--min-ram',
type=float,
help='Minimum RAM per job in GB')
parser.add_argument(
'--disk-size',
default=job_model.DEFAULT_DISK_SIZE,
type=int,
help='Size (in GB) of data disk to attach for each job')
parser.add_argument(
'--logging',
help='Cloud Storage path to send logging output'
' (either a folder, or file ending in ".log")')
# Add provider-specific arguments
# Shared arguments between the "google" and "google-v2" providers
google_common = parser.add_argument_group(
title='google-common',
description='Options common to the "google" and "google-v2" providers')
google_common.add_argument(
'--project', help='Cloud project ID in which to run the pipeline')
google_common.add_argument(
'--boot-disk-size',
default=job_model.DEFAULT_BOOT_DISK_SIZE,
type=int,
help='Size (in GB) of the boot disk')
google_common.add_argument(
'--preemptible',
default=False,
action='store_true',
help='Use a preemptible VM for the job')
google_common.add_argument(
'--zones', nargs='+', help='List of Google Compute Engine zones.')
google_common.add_argument(
'--scopes',
nargs='+',
help="""Space-separated scopes for Google Compute Engine instances.
If unspecified, provider will use '%s'""" % ','.join(
google_base.DEFAULT_SCOPES))
google_common.add_argument(
'--accelerator-type',
help="""The Compute Engine accelerator type. By specifying this parameter,
you will download and install the following third-party software onto
your job's Compute Engine instances: NVIDIA(R) Tesla(R) drivers and
NVIDIA(R) CUDA toolkit. Please see
https://cloud.google.com/compute/docs/gpus/ for supported GPU types
and
https://cloud.google.com/genomics/reference/rest/v1alpha2/pipelines#pipelineresources
for more details.""")
google_common.add_argument(
'--accelerator-count',
type=int,
default=0,
help="""The number of accelerators of the specified type to attach.
By specifying this parameter, you will download and install the
following third-party software onto your job's Compute Engine
instances: NVIDIA(R) Tesla(R) drivers and NVIDIA(R) CUDA toolkit.""")
google = parser.add_argument_group(
title='"google" provider options',
description='See also the "google-common" options listed above')
google.add_argument(
'--keep-alive',
type=int,
help="""Time (in seconds) to keep a tasks's virtual machine (VM) running
after a localization, docker command, or delocalization failure.
Allows for connecting to the VM for debugging.
Default is 0; maximum allowed value is 86400 (1 day).""")
google_v2 = parser.add_argument_group(
title='"google-v2" provider options',
description='See also the "google-common" options listed above')
google_v2.add_argument(
'--regions',
nargs='+',
help="""List of Google Compute Engine regions.
Only one of --zones and --regions may be specified.""")
google_v2.add_argument(
'--machine-type', help='Provider-specific machine type')
google_v2.add_argument(
'--cpu-platform',
help="""The CPU platform to request. Supported values can be found at
https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform"""
)
google_v2.add_argument(
'--network',
help="""The Compute Engine VPC network name to attach the VM's network
interface to. The value will be prefixed with global/networks/ unless
it contains a /, in which case it is assumed to be a fully specified
network resource URL.""")
google_v2.add_argument(
'--subnetwork',
help="""The name of the Compute Engine subnetwork to attach the instance
to.""")
google_v2.add_argument(
'--use-private-address',
default=False,
action='store_true',
help='If set to true, do not attach a public IP address to the VM.')
google_v2.add_argument(
'--timeout',
help="""The maximum amount of time to give the pipeline to complete.
This includes the time spent waiting for a worker to be allocated.
Time can be listed using a number followed by a unit. Supported units
are s (seconds), m (minutes), h (hours), d (days), w (weeks).
Example: '7d' (7 days).""")
google_v2.add_argument(
'--log-interval',
help="""The amount of time to sleep between copies of log files from
the pipeline to the logging path.
Time can be listed using a number followed by a unit. Supported units
are s (seconds), m (minutes), h (hours).
Example: '5m' (5 minutes). Default is '1m'.""")
google_v2.add_argument(
'--ssh',
default=False,
action='store_true',
help="""If set to true, start an ssh container in the background
to allow you to log in using SSH and debug in real time.""")
google_v2.add_argument(
'--nvidia-driver-version',
help="""The NVIDIA driver version to use when attaching an NVIDIA GPU
accelerator. The version specified here must be compatible with the
GPU libraries contained in the container being executed, and must be
one of the drivers hosted in the nvidia-drivers-us-public bucket on
Google Cloud Storage.""")
google_v2.add_argument(
'--service-account',
type=str,
help="""Email address of the service account to be authorized on the
Compute Engine VM for each job task. If not specified, the default
Compute Engine service account for the project will be used.""")
google_v2.add_argument(
'--disk-type',
help="""
The disk type to use for the data disk. Valid values are pd-standard
pd-ssd and local-ssd. The default value is pd-standard.""")
args = provider_base.parse_args(
parser, {
'google': ['project', 'zones', 'logging'],
'google-v2': ['project', 'logging'],
'test-fails': [],
'local': ['logging'],
}, argv)
if args.provider == 'google':
_google_parse_arguments(args)
if args.provider == 'google-v2':
_google_v2_parse_arguments(args)
return args
|
def _get_job_resources(args):
"""Extract job-global resources requirements from input args.
Args:
args: parsed command-line arguments
Returns:
Resources object containing the requested resources for the job
"""
logging = param_util.build_logging_param(
args.logging) if args.logging else None
timeout = param_util.timeout_in_seconds(args.timeout)
log_interval = param_util.log_interval_in_seconds(args.log_interval)
return job_model.Resources(
min_cores=args.min_cores,
min_ram=args.min_ram,
machine_type=args.machine_type,
disk_size=args.disk_size,
disk_type=args.disk_type,
boot_disk_size=args.boot_disk_size,
preemptible=args.preemptible,
image=args.image,
regions=args.regions,
zones=args.zones,
logging=logging,
logging_path=None,
service_account=args.service_account,
scopes=args.scopes,
keep_alive=args.keep_alive,
cpu_platform=args.cpu_platform,
network=args.network,
subnetwork=args.subnetwork,
use_private_address=args.use_private_address,
accelerator_type=args.accelerator_type,
accelerator_count=args.accelerator_count,
nvidia_driver_version=args.nvidia_driver_version,
timeout=timeout,
log_interval=log_interval,
ssh=args.ssh)
|
def _get_job_metadata(provider, user_id, job_name, script, task_ids,
user_project, unique_job_id):
"""Allow provider to extract job-specific metadata from command-line args.
Args:
provider: job service provider
user_id: user submitting the job
job_name: name for the job
script: the script to run
task_ids: a set of the task-ids for all tasks in the job
user_project: name of the project to be billed for the request
unique_job_id: generate a unique job id
Returns:
A dictionary of job-specific metadata (such as job id, name, etc.)
"""
create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())
user_id = user_id or dsub_util.get_os_user()
job_metadata = provider.prepare_job_metadata(script.name, job_name, user_id,
create_time)
if unique_job_id:
job_metadata['job-id'] = uuid.uuid4().hex
job_metadata['create-time'] = create_time
job_metadata['script'] = script
job_metadata['user-project'] = user_project
if task_ids:
job_metadata['task-ids'] = dsub_util.compact_interval_string(list(task_ids))
return job_metadata
|
def _resolve_task_logging(job_metadata, job_resources, task_descriptors):
"""Resolve the logging path from job and task properties.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
job_resources: Resources specified such as ram, cpu, and logging path.
task_descriptors: Task metadata, parameters, and resources.
Resolve the logging path, which may have substitution parameters such as
job-id, task-id, user-id, and job-name.
"""
if not job_resources.logging:
return
for task_descriptor in task_descriptors:
logging_uri = provider_base.format_logging_uri(
job_resources.logging.uri, job_metadata, task_descriptor.task_metadata)
logging_path = job_model.LoggingParam(logging_uri,
job_resources.logging.file_provider)
if task_descriptor.task_resources:
task_descriptor.task_resources = task_descriptor.task_resources._replace(
logging_path=logging_path)
else:
task_descriptor.task_resources = job_model.Resources(
logging_path=logging_path)
|
def _wait_after(provider, job_ids, poll_interval, stop_on_failure):
"""Print status info as we wait for those jobs.
Blocks until either all of the listed jobs succeed,
or one of them fails.
Args:
provider: job service provider
job_ids: a set of job IDs (string) to wait for
poll_interval: integer seconds to wait between iterations
stop_on_failure: whether to stop waiting if one of the tasks fails.
Returns:
Empty list if there was no error,
a list of error messages from the failed tasks otherwise.
"""
# Each time through the loop, the job_set is re-set to the jobs remaining to
# check. Jobs are removed from the list when they complete.
#
# We exit the loop when:
# * No jobs remain are running, OR
# * stop_on_failure is TRUE AND at least one job returned an error
# remove NO_JOB
job_ids_to_check = {j for j in job_ids if j != dsub_util.NO_JOB}
error_messages = []
while job_ids_to_check and (not error_messages or not stop_on_failure):
print('Waiting for: %s.' % (', '.join(job_ids_to_check)))
# Poll until any remaining jobs have completed
jobs_left = _wait_for_any_job(provider, job_ids_to_check, poll_interval)
# Calculate which jobs just completed
jobs_completed = job_ids_to_check.difference(jobs_left)
# Get all tasks for the newly completed jobs
tasks_completed = provider.lookup_job_tasks({'*'}, job_ids=jobs_completed)
# We don't want to overwhelm the user with output when there are many
# tasks per job. So we get a single "dominant" task for each of the
# completed jobs (one that is representative of the job's fate).
dominant_job_tasks = _dominant_task_for_jobs(tasks_completed)
if len(dominant_job_tasks) != len(jobs_completed):
# print info about the jobs we couldn't find
# (should only occur for "--after" where the job ID is a typo).
jobs_found = dsub_util.tasks_to_job_ids(dominant_job_tasks)
jobs_not_found = jobs_completed.difference(jobs_found)
for j in jobs_not_found:
error = '%s: not found' % j
print_error(' %s' % error)
error_messages += [error]
# Print the dominant task for the completed jobs
for t in dominant_job_tasks:
job_id = t.get_field('job-id')
status = t.get_field('task-status')
print(' %s: %s' % (str(job_id), str(status)))
if status in ['FAILURE', 'CANCELED']:
error_messages += [provider.get_tasks_completion_messages([t])]
job_ids_to_check = jobs_left
return error_messages
|
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor):
"""Wait for job and retry any tasks that fail.
Stops retrying an individual task when: it succeeds, is canceled, or has been
retried "retries" times.
This function exits when there are no tasks running and there are no tasks
eligible to be retried.
Args:
provider: job service provider
job_id: a single job ID (string) to wait for
poll_interval: integer seconds to wait between iterations
retries: number of retries
job_descriptor: job descriptor used to originally submit job
Returns:
Empty list if there was no error,
a list containing an error message from a failed task otherwise.
"""
while True:
tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id])
running_tasks = set()
completed_tasks = set()
canceled_tasks = set()
fully_failed_tasks = set()
task_fail_count = dict()
# This is an arbitrary task that is either fully failed or canceled (with
# preference for the former).
message_task = None
task_dict = dict()
for t in tasks:
task_id = job_model.numeric_task_id(t.get_field('task-id'))
task_dict[task_id] = t
status = t.get_field('task-status')
if status == 'FAILURE':
# Could compute this from task-attempt as well.
task_fail_count[task_id] = task_fail_count.get(task_id, 0) + 1
if task_fail_count[task_id] > retries:
fully_failed_tasks.add(task_id)
message_task = t
elif status == 'CANCELED':
canceled_tasks.add(task_id)
if not message_task:
message_task = t
elif status == 'SUCCESS':
completed_tasks.add(task_id)
elif status == 'RUNNING':
running_tasks.add(task_id)
retry_tasks = (
set(task_fail_count).difference(fully_failed_tasks)
.difference(running_tasks).difference(completed_tasks)
.difference(canceled_tasks))
# job completed.
if not retry_tasks and not running_tasks:
# If there are any fully failed tasks, return the completion message of an
# arbitrary one.
# If not, but there are canceled tasks, return the completion message of
# an arbitrary one.
if message_task:
return [provider.get_tasks_completion_messages([message_task])]
# Otherwise successful completion.
return []
for task_id in retry_tasks:
identifier = '{}.{}'.format(job_id, task_id) if task_id else job_id
print(' {} (attempt {}) failed. Retrying.'.format(
identifier, task_fail_count[task_id]))
msg = task_dict[task_id].get_field('status-message')
print(' Failure message: {}'.format(msg))
_retry_task(provider, job_descriptor, task_id,
task_fail_count[task_id] + 1)
SLEEP_FUNCTION(poll_interval)
|
def _retry_task(provider, job_descriptor, task_id, task_attempt):
"""Retry task_id (numeric id) assigning it task_attempt."""
td_orig = job_descriptor.find_task_descriptor(task_id)
new_task_descriptors = [
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': task_attempt
}, td_orig.task_params, td_orig.task_resources)
]
# Update the logging path.
_resolve_task_resources(job_descriptor.job_metadata,
job_descriptor.job_resources, new_task_descriptors)
provider.submit_job(
job_model.JobDescriptor(
job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, new_task_descriptors), False)
|
def _dominant_task_for_jobs(tasks):
"""A list with, for each job, its dominant task.
The dominant task is the one that exemplifies its job's
status. It is either:
- the first (FAILURE or CANCELED) task, or if none
- the first RUNNING task, or if none
- the first SUCCESS task.
Args:
tasks: a list of tasks to consider
Returns:
A list with, for each job, its dominant task.
"""
per_job = _group_tasks_by_jobid(tasks)
ret = []
for job_id in per_job.keys():
tasks_in_salience_order = sorted(per_job[job_id], key=_importance_of_task)
ret.append(tasks_in_salience_order[0])
return ret
|
def _group_tasks_by_jobid(tasks):
"""A defaultdict with, for each job, a list of its tasks."""
ret = collections.defaultdict(list)
for t in tasks:
ret[t.get_field('job-id')].append(t)
return ret
|
def _importance_of_task(task):
"""Tuple (importance, end-time). Smaller values are more important."""
# The status of a job is going to be determined by the roll-up of its tasks.
# A FAILURE or CANCELED task means the job has FAILED.
# If none, then any RUNNING task, the job is still RUNNING.
# If none, then the job status is SUCCESS.
#
# Thus the dominant task for each job is one that exemplifies its
# status:
#
# 1- The first (FAILURE or CANCELED) task, or if none
# 2- The first RUNNING task, or if none
# 3- The first SUCCESS task.
importance = {'FAILURE': 0, 'CANCELED': 0, 'RUNNING': 1, 'SUCCESS': 2}
return (importance[task.get_field('task-status')], task.get_field(
'end-time', datetime.datetime.max))
|
def _wait_for_any_job(provider, job_ids, poll_interval):
"""Waits until any of the listed jobs is not running.
In particular, if any of the jobs sees one of its tasks fail,
we count the whole job as failing (but do not terminate the remaining
tasks ourselves).
Args:
provider: job service provider
job_ids: a list of job IDs (string) to wait for
poll_interval: integer seconds to wait between iterations
Returns:
A set of the jobIDs with still at least one running task.
"""
if not job_ids:
return
while True:
tasks = provider.lookup_job_tasks({'*'}, job_ids=job_ids)
running_jobs = set()
failed_jobs = set()
for t in tasks:
status = t.get_field('task-status')
job_id = t.get_field('job-id')
if status in ['FAILURE', 'CANCELED']:
failed_jobs.add(job_id)
if status == 'RUNNING':
running_jobs.add(job_id)
remaining_jobs = running_jobs.difference(failed_jobs)
if failed_jobs or len(remaining_jobs) != len(job_ids):
return remaining_jobs
SLEEP_FUNCTION(poll_interval)
|
def _validate_job_and_task_arguments(job_params, task_descriptors):
"""Validates that job and task argument names do not overlap."""
if not task_descriptors:
return
task_params = task_descriptors[0].task_params
# The use case for specifying a label or env/input/output parameter on
# the command-line and also including it in the --tasks file is not obvious.
# Should the command-line override the --tasks file? Why?
# Until this use is articulated, generate an error on overlapping names.
# Check labels
from_jobs = {label.name for label in job_params['labels']}
from_tasks = {label.name for label in task_params['labels']}
intersect = from_jobs & from_tasks
if intersect:
raise ValueError(
'Names for labels on the command-line and in the --tasks file must not '
'be repeated: {}'.format(','.join(intersect)))
# Check envs, inputs, and outputs, all of which must not overlap each other
from_jobs = {
item.name
for item in job_params['envs'] | job_params['inputs']
| job_params['outputs']
}
from_tasks = {
item.name
for item in task_params['envs'] | task_params['inputs']
| task_params['outputs']
}
intersect = from_jobs & from_tasks
if intersect:
raise ValueError(
'Names for envs, inputs, and outputs on the command-line and in the '
'--tasks file must not be repeated: {}'.format(','.join(intersect)))
|
def run_main(args):
"""Execute job/task submission from command-line arguments."""
if args.command and args.script:
raise ValueError('Cannot supply both a --command and --script flag')
provider_base.check_for_unsupported_flag(args)
# Set up job parameters and job data from a tasks file or flags.
input_file_param_util = param_util.InputFileParamUtil(
DEFAULT_INPUT_LOCAL_PATH)
output_file_param_util = param_util.OutputFileParamUtil(
DEFAULT_OUTPUT_LOCAL_PATH)
mount_param_util = param_util.MountParamUtil(DEFAULT_MOUNT_LOCAL_PATH)
# Get job arguments from the command line
job_params = param_util.args_to_job_params(
args.env, args.label, args.input, args.input_recursive, args.output,
args.output_recursive, args.mount, input_file_param_util,
output_file_param_util, mount_param_util)
# If --tasks is on the command-line, then get task-specific data
if args.tasks:
task_descriptors = param_util.tasks_file_to_task_descriptors(
args.tasks, args.retries, input_file_param_util, output_file_param_util)
# Validate job data + task data
_validate_job_and_task_arguments(job_params, task_descriptors)
else:
# Create the implicit task
task_metadata = {'task-id': None}
if args.retries:
task_metadata['task-attempt'] = 1
task_descriptors = [
job_model.TaskDescriptor(task_metadata, {
'labels': set(),
'envs': set(),
'inputs': set(),
'outputs': set()
}, job_model.Resources())
]
return run(
provider_base.get_provider(args, resources),
_get_job_resources(args),
job_params,
task_descriptors,
name=args.name,
dry_run=args.dry_run,
command=args.command,
script=args.script,
user=args.user,
user_project=args.user_project,
wait=args.wait,
retries=args.retries,
poll_interval=args.poll_interval,
after=args.after,
skip=args.skip,
project=args.project,
disable_warning=True,
unique_job_id=args.unique_job_id)
|
def run(provider,
job_resources,
job_params,
task_descriptors,
name=None,
dry_run=False,
command=None,
script=None,
user=None,
user_project=None,
wait=False,
retries=0,
poll_interval=10,
after=None,
skip=False,
project=None,
disable_warning=False,
unique_job_id=False):
"""Actual dsub body, post-stdout-redirection."""
if not dry_run:
provider_base.emit_provider_message(provider)
if not disable_warning:
raise ValueError('Do not use this unstable API component!')
if command and script:
raise ValueError('Cannot supply both a command and script value.')
if command:
if name:
command_name = name
else:
command_name = _name_for_command(command)
# Add the shebang line to ensure the command is treated as Bash
script = job_model.Script(command_name, '#!/usr/bin/env bash\n' + command)
elif script:
# Read the script file
script_file = dsub_util.load_file(script)
script = job_model.Script(os.path.basename(script), script_file.read())
else:
raise ValueError('One of --command or a script name must be supplied')
if retries and not wait:
raise ValueError('Requesting retries requires requesting wait')
# The contract with providers and downstream code is that the job_params
# and task_params contain 'labels', 'envs', 'inputs', and 'outputs'.
job_model.ensure_job_params_are_complete(job_params)
job_model.ensure_task_params_are_complete(task_descriptors)
task_ids = {
task_descriptor.task_metadata.get('task-id')
for task_descriptor in task_descriptors
if task_descriptor.task_metadata.get('task-id') is not None
}
# Job and task parameters from the user have been validated.
# We can now compute some job and task properties, including:
# job_metadata such as the job-id, create-time, user-id, etc.
# task_resources such as the logging_path (which may include job-id, task-id)
job_metadata = _get_job_metadata(provider, user, name, script, task_ids,
user_project, unique_job_id)
_resolve_task_resources(job_metadata, job_resources, task_descriptors)
# Job and task properties are now all resolved. Begin execution!
if not dry_run:
print('Job: %s' % job_metadata['job-id'])
# Wait for predecessor jobs (if any)
if after:
if dry_run:
print('(Pretend) waiting for: %s.' % after)
else:
print('Waiting for predecessor jobs to complete...')
error_messages = _wait_after(provider, after, poll_interval, True)
if error_messages:
for msg in error_messages:
print_error(msg)
raise dsub_errors.PredecessorJobFailureError(
'One or more predecessor jobs completed but did not succeed.',
error_messages, None)
# Launch all the job tasks!
job_descriptor = job_model.JobDescriptor(job_metadata, job_params,
job_resources, task_descriptors)
launched_job = provider.submit_job(job_descriptor, skip)
if not dry_run:
if launched_job['job-id'] == dsub_util.NO_JOB:
print('Job output already present, skipping new job submission.')
return {'job-id': dsub_util.NO_JOB}
print('Launched job-id: %s' % launched_job['job-id'])
if launched_job.get('task-id'):
print('%s task(s)' % len(launched_job['task-id']))
print('To check the status, run:')
print(" dstat %s --jobs '%s' --users '%s' --status '*'" %
(provider_base.get_dstat_provider_args(provider, project),
launched_job['job-id'], launched_job['user-id']))
print('To cancel the job, run:')
print(" ddel %s --jobs '%s' --users '%s'" %
(provider_base.get_ddel_provider_args(provider, project),
launched_job['job-id'], launched_job['user-id']))
# Poll for job completion
if wait:
print('Waiting for job to complete...')
if retries:
error_messages = _wait_and_retry(provider, job_metadata['job-id'],
poll_interval, retries, job_descriptor)
else:
error_messages = _wait_after(provider, [job_metadata['job-id']],
poll_interval, False)
if error_messages:
for msg in error_messages:
print_error(msg)
raise dsub_errors.JobExecutionError(
'One or more jobs finished with status FAILURE or CANCELED'
' during wait.', error_messages, launched_job)
return launched_job
|
def _name_for_command(command):
r"""Craft a simple command name from the command.
The best command strings for this are going to be those where a simple
command was given; we will use the command to derive the name.
We won't always be able to figure something out and the caller should just
specify a "--name" on the command-line.
For example, commands like "export VAR=val\necho ${VAR}", this function would
return "export".
If the command starts space or a comment, then we'll skip to the first code
we can find.
If we find nothing, just return "command".
>>> _name_for_command('samtools index "${BAM}"')
'samtools'
>>> _name_for_command('/usr/bin/sort "${INFILE}" > "${OUTFILE}"')
'sort'
>>> _name_for_command('# This should be ignored')
'command'
>>> _name_for_command('\\\n\\\n# Bad continuations, but ignore.\necho hello.')
'echo'
Arguments:
command: the user-provided command
Returns:
a proposed name for the task.
"""
lines = command.splitlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#') and line != '\\':
return os.path.basename(re.split(r'\s', line)[0])
return 'command'
|
def _local_uri_rewriter(raw_uri):
"""Rewrite local file URIs as required by the rewrite_uris method.
Local file paths, unlike GCS paths, may have their raw URI simplified by
os.path.normpath which collapses extraneous indirect characters.
>>> _local_uri_rewriter('/tmp/a_path/../B_PATH/file.txt')
('/tmp/B_PATH/file.txt', 'file/tmp/B_PATH/file.txt')
>>> _local_uri_rewriter('/myhome/./mydir/')
('/myhome/mydir/', 'file/myhome/mydir/')
The local path rewriter will also work to preserve relative paths even
when creating the docker path. This prevents leaking of information on the
invoker's system to the remote system. Doing this requires a number of path
substitutions denoted with the _<rewrite>_ convention.
>>> _local_uri_rewriter('./../upper_dir/')[1]
'file/_dotdot_/upper_dir/'
>>> _local_uri_rewriter('~/localdata/*.bam')[1]
'file/_home_/localdata/*.bam'
Args:
raw_uri: (str) the raw file or directory path.
Returns:
normalized: a simplified and/or expanded version of the uri.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
"""
# The path is split into components so that the filename is not rewritten.
raw_path, filename = os.path.split(raw_uri)
# Generate the local path that can be resolved by filesystem operations,
# this removes special shell characters, condenses indirects and replaces
# any unnecessary prefix.
prefix_replacements = [('file:///', '/'), ('~/', os.getenv('HOME')), ('./',
''),
('file:/', '/')]
normed_path = raw_path
for prefix, replacement in prefix_replacements:
if normed_path.startswith(prefix):
normed_path = os.path.join(replacement, normed_path[len(prefix):])
# Because abspath strips the trailing '/' from bare directory references
# other than root, this ensures that all directory references end with '/'.
normed_uri = directory_fmt(os.path.abspath(normed_path))
normed_uri = os.path.join(normed_uri, filename)
# Generate the path used inside the docker image;
# 1) Get rid of extra indirects: /this/./that -> /this/that
# 2) Rewrite required indirects as synthetic characters.
# 3) Strip relative or absolute path leading character.
# 4) Add 'file/' prefix.
docker_rewrites = [(r'/\.\.', '/_dotdot_'), (r'^\.\.', '_dotdot_'),
(r'^~/', '_home_/'), (r'^file:/', '')]
docker_path = os.path.normpath(raw_path)
for pattern, replacement in docker_rewrites:
docker_path = re.sub(pattern, replacement, docker_path)
docker_path = docker_path.lstrip('./') # Strips any of '.' './' '/'.
docker_path = directory_fmt('file/' + docker_path) + filename
return normed_uri, docker_path
|
def _get_filtered_mounts(mounts, mount_param_type):
"""Helper function to return an appropriate set of mount parameters."""
return set([mount for mount in mounts if isinstance(mount, mount_param_type)])
|
def build_logging_param(logging_uri, util_class=OutputFileParamUtil):
"""Convenience function simplifies construction of the logging uri."""
if not logging_uri:
return job_model.LoggingParam(None, None)
recursive = not logging_uri.endswith('.log')
oututil = util_class('')
_, uri, provider = oututil.parse_uri(logging_uri, recursive)
if '*' in uri.basename:
raise ValueError('Wildcards not allowed in logging URI: %s' % uri)
return job_model.LoggingParam(uri, provider)
|
def split_pair(pair_string, separator, nullable_idx=1):
"""Split a string into a pair, which can have one empty value.
Args:
pair_string: The string to be split.
separator: The separator to be used for splitting.
nullable_idx: The location to be set to null if the separator is not in the
input string. Should be either 0 or 1.
Returns:
A list containing the pair.
Raises:
IndexError: If nullable_idx is not 0 or 1.
"""
pair = pair_string.split(separator, 1)
if len(pair) == 1:
if nullable_idx == 0:
return [None, pair[0]]
elif nullable_idx == 1:
return [pair[0], None]
else:
raise IndexError('nullable_idx should be either 0 or 1.')
else:
return pair
|
def parse_tasks_file_header(header, input_file_param_util,
output_file_param_util):
"""Parse the header from the tasks file into env, input, output definitions.
Elements are formatted similar to their equivalent command-line arguments,
but with associated values coming from the data rows.
Environment variables columns are headered as "--env <name>"
Inputs columns are headered as "--input <name>" with the name optional.
Outputs columns are headered as "--output <name>" with the name optional.
For historical reasons, bareword column headers (such as "JOB_ID") are
equivalent to "--env var_name".
Args:
header: Array of header fields
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
job_params: A list of EnvParams and FileParams for the environment
variables, LabelParams, input file parameters, and output file parameters.
Raises:
ValueError: If a header contains a ":" and the prefix is not supported.
"""
job_params = []
for col in header:
# Reserve the "-" and "--" namespace.
# If the column has no leading "-", treat it as an environment variable
col_type = '--env'
col_value = col
if col.startswith('-'):
col_type, col_value = split_pair(col, ' ', 1)
if col_type == '--env':
job_params.append(job_model.EnvParam(col_value))
elif col_type == '--label':
job_params.append(job_model.LabelParam(col_value))
elif col_type == '--input' or col_type == '--input-recursive':
name = input_file_param_util.get_variable_name(col_value)
job_params.append(
job_model.InputFileParam(
name, recursive=(col_type.endswith('recursive'))))
elif col_type == '--output' or col_type == '--output-recursive':
name = output_file_param_util.get_variable_name(col_value)
job_params.append(
job_model.OutputFileParam(
name, recursive=(col_type.endswith('recursive'))))
else:
raise ValueError('Unrecognized column header: %s' % col)
return job_params
|
def tasks_file_to_task_descriptors(tasks, retries, input_file_param_util,
output_file_param_util):
"""Parses task parameters from a TSV.
Args:
tasks: Dict containing the path to a TSV file and task numbers to run
variables, input, and output parameters as column headings. Subsequent
lines specify parameter values, one row per job.
retries: Number of retries allowed.
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
task_descriptors: an array of records, each containing the task-id,
task-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of
parameters for each task of the job.
Raises:
ValueError: If no job records were provided
"""
task_descriptors = []
path = tasks['path']
task_min = tasks.get('min')
task_max = tasks.get('max')
# Load the file and set up a Reader that tokenizes the fields
param_file = dsub_util.load_file(path)
reader = csv.reader(param_file, delimiter='\t')
# Read the first line and extract the parameters
header = six.advance_iterator(reader)
job_params = parse_tasks_file_header(header, input_file_param_util,
output_file_param_util)
# Build a list of records from the parsed input file
for row in reader:
# Tasks are numbered starting at 1 and since the first line of the TSV
# file is a header, the first task appears on line 2.
task_id = reader.line_num - 1
if task_min and task_id < task_min:
continue
if task_max and task_id > task_max:
continue
if len(row) != len(job_params):
dsub_util.print_error('Unexpected number of fields %s vs %s: line %s' %
(len(row), len(job_params), reader.line_num))
# Each row can contain "envs", "inputs", "outputs"
envs = set()
inputs = set()
outputs = set()
labels = set()
for i in range(0, len(job_params)):
param = job_params[i]
name = param.name
if isinstance(param, job_model.EnvParam):
envs.add(job_model.EnvParam(name, row[i]))
elif isinstance(param, job_model.LabelParam):
labels.add(job_model.LabelParam(name, row[i]))
elif isinstance(param, job_model.InputFileParam):
inputs.add(
input_file_param_util.make_param(name, row[i], param.recursive))
elif isinstance(param, job_model.OutputFileParam):
outputs.add(
output_file_param_util.make_param(name, row[i], param.recursive))
task_descriptors.append(
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': 1 if retries else None
}, {
'labels': labels,
'envs': envs,
'inputs': inputs,
'outputs': outputs
}, job_model.Resources()))
# Ensure that there are jobs to execute (and not just a header)
if not task_descriptors:
raise ValueError('No tasks added from %s' % path)
return task_descriptors
|
def parse_pair_args(labels, argclass):
"""Parse flags of key=value pairs and return a list of argclass.
For pair variables, we need to:
* split the input into name=value pairs (value optional)
* Create the EnvParam object
Args:
labels: list of 'key' or 'key=value' strings.
argclass: Container class for args, must instantiate with argclass(k, v).
Returns:
list of argclass objects.
"""
label_data = set()
for arg in labels:
name, value = split_pair(arg, '=', nullable_idx=1)
label_data.add(argclass(name, value))
return label_data
|
def args_to_job_params(envs, labels, inputs, inputs_recursive, outputs,
outputs_recursive, mounts, input_file_param_util,
output_file_param_util, mount_param_util):
"""Parse env, input, and output parameters into a job parameters and data.
Passing arguments on the command-line allows for launching a single job.
The env, input, and output arguments encode both the definition of the
job as well as the single job's values.
Env arguments are simple name=value pairs.
Input and output file arguments can contain name=value pairs or just values.
Either of the following is valid:
uri
myfile=uri
Args:
envs: list of environment variable job parameters
labels: list of labels to attach to the tasks
inputs: list of file input parameters
inputs_recursive: list of recursive directory input parameters
outputs: list of file output parameters
outputs_recursive: list of recursive directory output parameters
mounts: list of gcs buckets to mount
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
mount_param_util: Utility for producing MountParam objects.
Returns:
job_params: a dictionary of 'envs', 'inputs', and 'outputs' that defines the
set of parameters and data for a job.
"""
# Parse environmental variables and labels.
env_data = parse_pair_args(envs, job_model.EnvParam)
label_data = parse_pair_args(labels, job_model.LabelParam)
# For input files, we need to:
# * split the input into name=uri pairs (name optional)
# * get the environmental variable name, or automatically set if null.
# * create the input file param
input_data = set()
for (recursive, args) in ((False, inputs), (True, inputs_recursive)):
for arg in args:
name, value = split_pair(arg, '=', nullable_idx=0)
name = input_file_param_util.get_variable_name(name)
input_data.add(input_file_param_util.make_param(name, value, recursive))
# For output files, we need to:
# * split the input into name=uri pairs (name optional)
# * get the environmental variable name, or automatically set if null.
# * create the output file param
output_data = set()
for (recursive, args) in ((False, outputs), (True, outputs_recursive)):
for arg in args:
name, value = split_pair(arg, '=', 0)
name = output_file_param_util.get_variable_name(name)
output_data.add(output_file_param_util.make_param(name, value, recursive))
mount_data = set()
for arg in mounts:
# Mounts can look like `--mount VAR=PATH` or `--mount VAR=PATH {num}`,
# where num is the size of the disk in Gb. We assume a space is the
# separator between path and disk size.
if ' ' in arg:
key_value_pair, disk_size = arg.split(' ')
name, value = split_pair(key_value_pair, '=', 1)
mount_data.add(mount_param_util.make_param(name, value, disk_size))
else:
name, value = split_pair(arg, '=', 1)
mount_data.add(mount_param_util.make_param(name, value, disk_size=None))
return {
'envs': env_data,
'inputs': input_data,
'outputs': output_data,
'labels': label_data,
'mounts': mount_data,
}
|
def validate_submit_args_or_fail(job_descriptor, provider_name, input_providers,
output_providers, logging_providers):
"""Validate that arguments passed to submit_job have valid file providers.
This utility function takes resources and task data args from `submit_job`
in the base provider. This function will fail with a value error if any of the
parameters are not valid. See the following example;
>>> job_resources = type('', (object,),
... {"logging": job_model.LoggingParam('gs://logtemp', job_model.P_GCS)})()
>>> job_params={'inputs': set(), 'outputs': set(), 'mounts': set()}
>>> task_descriptors = [
... job_model.TaskDescriptor(None, {
... 'inputs': {
... job_model.FileParam('IN', uri='gs://in/*',
... file_provider=job_model.P_GCS)},
... 'outputs': set()}, None),
... job_model.TaskDescriptor(None, {
... 'inputs': set(),
... 'outputs': {
... job_model.FileParam('OUT', uri='gs://out/*',
... file_provider=job_model.P_GCS)}}, None)]
...
>>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,
... job_resources, task_descriptors),
... provider_name='MYPROVIDER',
... input_providers=[job_model.P_GCS],
... output_providers=[job_model.P_GCS],
... logging_providers=[job_model.P_GCS])
...
>>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,
... job_resources, task_descriptors),
... provider_name='MYPROVIDER',
... input_providers=[job_model.P_GCS],
... output_providers=[job_model.P_LOCAL],
... logging_providers=[job_model.P_GCS])
Traceback (most recent call last):
...
ValueError: Unsupported output path (gs://out/*) for provider 'MYPROVIDER'.
Args:
job_descriptor: instance of job_model.JobDescriptor.
provider_name: (str) the name of the execution provider.
input_providers: (string collection) whitelist of file providers for input.
output_providers: (string collection) whitelist of providers for output.
logging_providers: (string collection) whitelist of providers for logging.
Raises:
ValueError: if any file providers do not match the whitelists.
"""
job_resources = job_descriptor.job_resources
job_params = job_descriptor.job_params
task_descriptors = job_descriptor.task_descriptors
# Validate logging file provider.
_validate_providers([job_resources.logging], 'logging', logging_providers,
provider_name)
# Validate job input and output file providers
_validate_providers(job_params['inputs'], 'input', input_providers,
provider_name)
_validate_providers(job_params['outputs'], 'output', output_providers,
provider_name)
# Validate input and output file providers.
for task_descriptor in task_descriptors:
_validate_providers(task_descriptor.task_params['inputs'], 'input',
input_providers, provider_name)
_validate_providers(task_descriptor.task_params['outputs'], 'output',
output_providers, provider_name)
|
def handle_version_flag():
"""If the --version flag is passed, print version to stdout and exit.
Within dsub commands, --version should be the highest priority flag.
This function supplies a repeatable and DRY way of checking for the
version flag and printing the version. Callers still need to define a version
flag in the command's flags so that it shows up in help output.
"""
parser = argparse.ArgumentParser(description='Version parser', add_help=False)
parser.add_argument('--version', '-v', dest='version', action='store_true')
parser.set_defaults(version=False)
args, _ = parser.parse_known_args()
if args.version:
print('dsub version: %s' % DSUB_VERSION)
sys.exit()
|
def age_to_create_time(age, from_time=None):
"""Compute the create time (UTC) for the list filter.
If the age is an integer value it is treated as a UTC date.
Otherwise the value must be of the form "<integer><unit>" where supported
units are s, m, h, d, w (seconds, minutes, hours, days, weeks).
Args:
age: A "<integer><unit>" string or integer value.
from_time:
Returns:
A timezone-aware datetime or None if age parameter is empty.
"""
if not age:
return None
if not from_time:
from_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())
try:
last_char = age[-1]
if last_char == 's':
return from_time - datetime.timedelta(seconds=int(age[:-1]))
elif last_char == 'm':
return from_time - datetime.timedelta(minutes=int(age[:-1]))
elif last_char == 'h':
return from_time - datetime.timedelta(hours=int(age[:-1]))
elif last_char == 'd':
return from_time - datetime.timedelta(days=int(age[:-1]))
elif last_char == 'w':
return from_time - datetime.timedelta(weeks=int(age[:-1]))
else:
# If no unit is given treat the age as seconds from epoch, otherwise apply
# the correct time unit.
return dsub_util.replace_timezone(
datetime.datetime.utcfromtimestamp(int(age)), pytz.utc)
except (ValueError, OverflowError) as e:
raise ValueError('Unable to parse age string %s: %s' % (age, e))
|
def _interval_to_seconds(interval, valid_units='smhdw'):
"""Convert the timeout duration to seconds.
The value must be of the form "<integer><unit>" where supported
units are s, m, h, d, w (seconds, minutes, hours, days, weeks).
Args:
interval: A "<integer><unit>" string.
valid_units: A list of supported units.
Returns:
A string of the form "<integer>s" or None if timeout is empty.
"""
if not interval:
return None
try:
last_char = interval[-1]
if last_char == 's' and 's' in valid_units:
return str(float(interval[:-1])) + 's'
elif last_char == 'm' and 'm' in valid_units:
return str(float(interval[:-1]) * 60) + 's'
elif last_char == 'h' and 'h' in valid_units:
return str(float(interval[:-1]) * 60 * 60) + 's'
elif last_char == 'd' and 'd' in valid_units:
return str(float(interval[:-1]) * 60 * 60 * 24) + 's'
elif last_char == 'w' and 'w' in valid_units:
return str(float(interval[:-1]) * 60 * 60 * 24 * 7) + 's'
else:
raise ValueError(
'Unsupported units in interval string %s: %s' % (interval, last_char))
except (ValueError, OverflowError) as e:
raise ValueError('Unable to parse interval string %s: %s' % (interval, e))
|
def get_variable_name(self, name):
"""Produce a default variable name if none is specified."""
if not name:
name = '%s%s' % (self._auto_prefix, self._auto_index)
self._auto_index += 1
return name
|
def rewrite_uris(self, raw_uri, file_provider):
"""Accept a raw uri and return rewritten versions.
This function returns a normalized URI and a docker path. The normalized
URI may have minor alterations meant to disambiguate and prepare for use
by shell utilities that may require a specific format.
The docker rewriter makes substantial modifications to the raw URI when
constructing a docker path, but modifications must follow these rules:
1) System specific characters are not allowed (ex. indirect paths).
2) The path, if it is a directory, must end in a forward slash.
3) The path will begin with the value set in self._relative_path.
4) The path will have an additional prefix (after self._relative_path) set
by the file provider-specific rewriter.
Rewrite output for the docker path:
>>> out_util = FileParamUtil('AUTO_', 'output')
>>> out_util.rewrite_uris('gs://mybucket/myfile.txt', job_model.P_GCS)[1]
'output/gs/mybucket/myfile.txt'
>>> out_util.rewrite_uris('./data/myfolder/', job_model.P_LOCAL)[1]
'output/file/data/myfolder/'
When normalizing the URI for cloud buckets, no rewrites are done. For local
files, the user directory will be expanded and relative paths will be
converted to absolute:
>>> in_util = FileParamUtil('AUTO_', 'input')
>>> in_util.rewrite_uris('gs://mybucket/gcs_dir/', job_model.P_GCS)[0]
'gs://mybucket/gcs_dir/'
>>> in_util.rewrite_uris('/data/./dir_a/../myfile.txt',
... job_model.P_LOCAL)[0]
'/data/myfile.txt'
>>> in_util.rewrite_uris('file:///tmp/data/*.bam', job_model.P_LOCAL)[0]
'/tmp/data/*.bam'
Args:
raw_uri: (str) the path component of the raw URI.
file_provider: a valid provider (contained in job_model.FILE_PROVIDERS).
Returns:
normalized: a cleaned version of the uri provided by command line.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
Raises:
ValueError: if file_provider is not valid.
"""
if file_provider == job_model.P_GCS:
normalized, docker_path = _gcs_uri_rewriter(raw_uri)
elif file_provider == job_model.P_LOCAL:
normalized, docker_path = _local_uri_rewriter(raw_uri)
else:
raise ValueError('File provider not supported: %r' % file_provider)
return normalized, os.path.join(self._relative_path, docker_path)
|
def parse_file_provider(uri):
"""Find the file provider for a URI."""
providers = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL}
# URI scheme detector uses a range up to 30 since none of the IANA
# registered schemes are longer than this.
provider_found = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', uri)
if provider_found:
prefix = provider_found.group(1).lower()
else:
# If no provider is specified in the URI, assume that the local
# filesystem is being used. Availability and validity of the local
# file/directory will be checked later.
prefix = 'file'
if prefix in providers:
return providers[prefix]
else:
raise ValueError('File prefix not supported: %s://' % prefix)
|
def _validate_paths_or_fail(uri, recursive):
"""Do basic validation of the uri, return the path and filename."""
path, filename = os.path.split(uri)
# dsub could support character ranges ([0-9]) with some more work, but for
# now we assume that basic asterisk wildcards are sufficient. Reject any URI
# that includes square brackets or question marks, since we know that
# if they actually worked, it would be accidental.
if '[' in uri or ']' in uri:
raise ValueError(
'Square bracket (character ranges) are not supported: %s' % uri)
if '?' in uri:
raise ValueError('Question mark wildcards are not supported: %s' % uri)
# Only support file URIs and *filename* wildcards
# Wildcards at the directory level or "**" syntax would require better
# support from the Pipelines API *or* doing expansion here and
# (potentially) producing a series of FileParams, instead of one.
if '*' in path:
raise ValueError(
'Path wildcard (*) are only supported for files: %s' % uri)
if '**' in filename:
raise ValueError('Recursive wildcards ("**") not supported: %s' % uri)
if filename in ('..', '.'):
raise ValueError('Path characters ".." and "." not supported '
'for file names: %s' % uri)
# Do not allow non-recursive IO to reference directories.
if not recursive and not filename:
raise ValueError('Input or output values that are not recursive must '
'reference a filename or wildcard: %s' % uri)
|
def parse_uri(self, raw_uri, recursive):
"""Return a valid docker_path, uri, and file provider from a flag value."""
# Assume recursive URIs are directory paths.
if recursive:
raw_uri = directory_fmt(raw_uri)
# Get the file provider, validate the raw URI, and rewrite the path
# component of the URI for docker and remote.
file_provider = self.parse_file_provider(raw_uri)
self._validate_paths_or_fail(raw_uri, recursive)
uri, docker_uri = self.rewrite_uris(raw_uri, file_provider)
uri_parts = job_model.UriParts(
directory_fmt(os.path.dirname(uri)), os.path.basename(uri))
return docker_uri, uri_parts, file_provider
|
def make_param(self, name, raw_uri, recursive):
"""Return a *FileParam given an input uri."""
if not raw_uri:
return self.param_class(name, None, None, None, recursive, None)
docker_path, uri_parts, provider = self.parse_uri(raw_uri, recursive)
return self.param_class(name, raw_uri, docker_path, uri_parts, recursive,
provider)
|
def _parse_image_uri(self, raw_uri):
"""Return a valid docker_path from a Google Persistent Disk url."""
# The string replace is so we don't have colons and double slashes in the
# mount path. The idea is the resulting mount path would look like:
# /mnt/data/mount/http/www.googleapis.com/compute/v1/projects/...
docker_uri = os.path.join(self._relative_path,
raw_uri.replace('https://', 'https/', 1))
return docker_uri
|
def _parse_local_mount_uri(self, raw_uri):
"""Return a valid docker_path for a local file path."""
raw_uri = directory_fmt(raw_uri)
_, docker_path = _local_uri_rewriter(raw_uri)
local_path = docker_path[len('file'):]
docker_uri = os.path.join(self._relative_path, docker_path)
return local_path, docker_uri
|
def _parse_gcs_uri(self, raw_uri):
"""Return a valid docker_path for a GCS bucket."""
# Assume URI is a directory path.
raw_uri = directory_fmt(raw_uri)
_, docker_path = _gcs_uri_rewriter(raw_uri)
docker_uri = os.path.join(self._relative_path, docker_path)
return docker_uri
|
def make_param(self, name, raw_uri, disk_size):
"""Return a MountParam given a GCS bucket, disk image or local path."""
if raw_uri.startswith('https://www.googleapis.com/compute'):
# Full Image URI should look something like:
# https://www.googleapis.com/compute/v1/projects/<project>/global/images/
# But don't validate further, should the form of a valid image URI
# change (v1->v2, for example)
docker_path = self._parse_image_uri(raw_uri)
return job_model.PersistentDiskMountParam(
name, raw_uri, docker_path, disk_size, disk_type=None)
elif raw_uri.startswith('file://'):
local_path, docker_path = self._parse_local_mount_uri(raw_uri)
return job_model.LocalMountParam(name, raw_uri, docker_path, local_path)
elif raw_uri.startswith('gs://'):
docker_path = self._parse_gcs_uri(raw_uri)
return job_model.GCSMountParam(name, raw_uri, docker_path)
else:
raise ValueError(
'Mount parameter {} must begin with valid prefix.'.format(raw_uri))
|
def validate_param_name(name, param_type):
"""Validate that the name follows posix conventions for env variables."""
# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_235
#
# 3.235 Name
# In the shell command language, a word consisting solely of underscores,
# digits, and alphabetics from the portable character set.
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', name):
raise ValueError('Invalid %s: %s' % (param_type, name))
|
def validate_bucket_name(bucket):
"""Validate that the name is a valid GCS bucket."""
if not bucket.startswith('gs://'):
raise ValueError(
'Invalid bucket path "%s". Must start with "gs://".' % bucket)
bucket_name = bucket[len('gs://'):]
if not re.search(r'^\w[\w_\.-]{1,61}\w$', bucket_name):
raise ValueError('Invalid bucket name: %s' % bucket)
|
def convert_to_label_chars(s):
"""Turn the specified name and value into a valid Google label."""
# We want the results to be user-friendly, not just functional.
# So we can't base-64 encode it.
# * If upper-case: lower-case it
# * If the char is not a standard letter or digit. make it a dash
# March 2019 note: underscores are now allowed in labels.
# However, removing the conversion of underscores to dashes here would
# create inconsistencies between old jobs and new jobs.
# With existing code, $USER "jane_doe" has a user-id label of "jane-doe".
# If we remove the conversion, the user-id label for new jobs is "jane_doe".
# This makes looking up old jobs more complicated.
accepted_characters = string.ascii_lowercase + string.digits + '-'
def label_char_transform(char):
if char in accepted_characters:
return char
if char in string.ascii_uppercase:
return char.lower()
return '-'
return ''.join(label_char_transform(c) for c in s)
|
def ensure_task_params_are_complete(task_descriptors):
"""For each task, ensure that each task param entry is not None."""
for task_desc in task_descriptors:
for param in [
'labels', 'envs', 'inputs', 'outputs', 'input-recursives',
'output-recursives'
]:
if not task_desc.task_params.get(param):
task_desc.task_params[param] = set()
|
def _remove_empty_items(d, required):
"""Return a new dict with any empty items removed.
Note that this is not a deep check. If d contains a dictionary which
itself contains empty items, those are never checked.
This method exists to make to_serializable() functions cleaner.
We could revisit this some day, but for now, the serialized objects are
stripped of empty values to keep the output YAML more compact.
Args:
d: a dictionary
required: list of required keys (for example, TaskDescriptors always emit
the "task-id", even if None)
Returns:
A dictionary with empty items removed.
"""
new_dict = {}
for k, v in d.items():
if k in required:
new_dict[k] = v
elif isinstance(v, int) or v:
# "if v" would suppress emitting int(0)
new_dict[k] = v
return new_dict
|
def task_view_generator(job_descriptor):
"""Generator that yields a task-specific view of the job.
This generator exists to make it easy for callers to iterate over the tasks
in a JobDescriptor. Each pass yields a new JobDescriptor with a single task.
Args:
job_descriptor: A JobDescriptor with 1 or more tasks.
Yields:
A JobDescriptor with a single task.
"""
for task_descriptor in job_descriptor.task_descriptors:
jd = JobDescriptor(job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, [task_descriptor])
yield jd
|
def numeric_task_id(task_id):
"""Converts a task-id to the numeric task-id.
Args:
task_id: task-id in either task-n or n format
Returns:
n
"""
# This function exists to support the legacy "task-id" format in the "google"
# provider. Google labels originally could not be numeric. When the google
# provider is completely replaced by the google-v2 provider, this function can
# go away.
if task_id is not None:
if task_id.startswith('task-'):
return int(task_id[len('task-'):])
else:
return int(task_id)
|
def _validate_label(cls, name, value):
"""Raise ValueError if the label is invalid."""
# Rules for labels are described in:
# https://cloud.google.com/compute/docs/labeling-resources#restrictions
# * Keys and values cannot be longer than 63 characters each.
# * Keys and values can only contain lowercase letters, numeric characters,
# underscores, and dashes.
# * International characters are allowed.
# * Label keys must start with a lowercase letter and international
# characters are allowed.
# * Label keys cannot be empty.
cls._check_label_name(name)
cls._check_label_value(value)
# Ensure that reserved labels are not being used.
if not cls._allow_reserved_keys and name in RESERVED_LABELS:
raise ValueError('Label flag (%s=...) must not use reserved keys: %r' %
(name, list(RESERVED_LABELS)))
|
def to_serializable(self):
"""Return a dict populated for serialization (as YAML/JSON)."""
task_metadata = self.task_metadata
task_params = self.task_params
task_resources = self.task_resources
# The only required field is the task-id, even if it is None
task_id = None
if task_metadata.get('task-id') is not None:
task_id = str(task_metadata.get('task-id'))
task = {'task-id': task_id}
task['create-time'] = task_metadata.get('create-time')
task['task-attempt'] = task_metadata.get('task-attempt')
if task_resources.logging_path:
task['logging-path'] = str(task_resources.logging_path.uri)
task['labels'] = {var.name: var.value for var in task_params['labels']}
task['envs'] = {var.name: var.value for var in task_params['envs']}
task['inputs'] = {
var.name: var.value
for var in task_params['inputs']
if not var.recursive
}
task['input-recursives'] = {
var.name: var.value
for var in task_params['inputs']
if var.recursive
}
task['outputs'] = {
var.name: var.value
for var in task_params['outputs']
if not var.recursive
}
task['output-recursives'] = {
var.name: var.value
for var in task_params['outputs']
if var.recursive
}
return _remove_empty_items(task, ['task-id'])
|
def to_serializable(self):
"""Return a dict populated for serialization (as YAML/JSON)."""
job_metadata = self.job_metadata
job_resources = self.job_resources
job_params = self.job_params
task_descriptors = self.task_descriptors
job = {
'job-id': job_metadata.get('job-id'),
'job-name': job_metadata.get('job-name'),
'user-id': job_metadata.get('user-id'),
'create-time': job_metadata.get('create-time'),
'dsub-version': job_metadata.get('dsub-version'),
'user-project': job_metadata.get('user-project'),
'task-ids': job_metadata.get('task-ids'),
'script-name': job_metadata['script'].name,
}
# logging is specified as a command-line argument and is typically
# transformed (substituting job-id). The transformed value is saved
# on a per-task basis as the 'logging-path'.
if job_resources.logging:
job['logging'] = str(job_resources.logging.uri)
job['labels'] = {var.name: var.value for var in job_params['labels']}
job['envs'] = {var.name: var.value for var in job_params['envs']}
job['inputs'] = {
var.name: var.value
for var in job_params['inputs']
if not var.recursive
}
job['input-recursives'] = {
var.name: var.value
for var in job_params['inputs']
if var.recursive
}
job['outputs'] = {
var.name: var.value
for var in job_params['outputs']
if not var.recursive
}
job['output-recursives'] = {
var.name: var.value
for var in job_params['outputs']
if var.recursive
}
job['mounts'] = {var.name: var.value for var in job_params['mounts']}
tasks = []
for task_descriptor in task_descriptors:
tasks.append(task_descriptor.to_serializable())
job['tasks'] = tasks
return _remove_empty_items(job, [])
|
def _from_yaml_v0(cls, job):
"""Populate a JobDescriptor from the local provider's original meta.yaml.
The local job provider had the first incarnation of a YAML file for each
task. That idea was extended here in the JobDescriptor and the local
provider adopted the JobDescriptor.to_yaml() call to write its meta.yaml.
The JobDescriptor.from_yaml() detects if it receives a local provider's
"v0" meta.yaml and calls this function.
Args:
job: an object produced from decoding meta.yaml.
Returns:
A JobDescriptor populated as best we can from the old meta.yaml.
"""
# The v0 meta.yaml only contained:
# create-time, job-id, job-name, logging, task-id
# labels, envs, inputs, outputs
# It did NOT contain user-id.
# dsub-version might be there as a label.
job_metadata = {}
for key in ['job-id', 'job-name', 'create-time']:
job_metadata[key] = job.get(key)
# Make sure that create-time string is turned into a datetime
job_metadata['create-time'] = dsub_util.replace_timezone(
datetime.datetime.strptime(job['create-time'], '%Y-%m-%d %H:%M:%S.%f'),
tzlocal())
# The v0 meta.yaml contained a "logging" field which was the task-specific
# logging path. It did not include the actual "--logging" value the user
# specified.
job_resources = Resources()
# The v0 meta.yaml represented a single task.
# It did not distinguish whether params were job params or task params.
# We will treat them as either all job params or all task params, based on
# whether the task-id is empty or an integer value.
#
# We also cannot distinguish whether inputs/outputs were recursive or not.
# Just treat them all as non-recursive.
params = {}
# The dsub-version may be in the meta.yaml as a label. If so remove it
# and set it as a top-level job metadata value.
labels = job.get('labels', {})
if 'dsub-version' in labels:
job_metadata['dsub-version'] = labels['dsub-version']
del labels['dsub-version']
params['labels'] = cls._label_params_from_dict(labels)
params['envs'] = cls._env_params_from_dict(job.get('envs', {}))
params['inputs'] = cls._input_file_params_from_dict(
job.get('inputs', {}), False)
params['outputs'] = cls._output_file_params_from_dict(
job.get('outputs', {}), False)
if job.get('task-id') is None:
job_params = params
task_metadata = {'task-id': None}
task_params = {}
else:
job_params = {}
task_metadata = {'task-id': str(job.get('task-id'))}
task_params = params
task_resources = Resources(logging_path=job.get('logging'))
task_descriptors = [
TaskDescriptor.get_complete_descriptor(task_metadata, task_params,
task_resources)
]
return JobDescriptor.get_complete_descriptor(
job_metadata, job_params, job_resources, task_descriptors)
|
def from_yaml(cls, yaml_string):
"""Populate and return a JobDescriptor from a YAML string."""
try:
job = yaml.full_load(yaml_string)
except AttributeError:
# For installations that cannot update their PyYAML version
job = yaml.load(yaml_string)
# If the YAML does not contain a top-level dsub version, then assume that
# the string is coming from the local provider, reading an old version of
# its meta.yaml.
dsub_version = job.get('dsub-version')
if not dsub_version:
return cls._from_yaml_v0(job)
job_metadata = {}
for key in [
'job-id', 'job-name', 'task-ids', 'user-id', 'dsub-version',
'user-project', 'script-name'
]:
if job.get(key) is not None:
job_metadata[key] = job.get(key)
# Make sure that create-time string is turned into a datetime
job_metadata['create-time'] = dsub_util.replace_timezone(
job.get('create-time'), pytz.utc)
job_resources = Resources(logging=job.get('logging'))
job_params = {}
job_params['labels'] = cls._label_params_from_dict(job.get('labels', {}))
job_params['envs'] = cls._env_params_from_dict(job.get('envs', {}))
job_params['inputs'] = cls._input_file_params_from_dict(
job.get('inputs', {}), False)
job_params['input-recursives'] = cls._input_file_params_from_dict(
job.get('input-recursives', {}), True)
job_params['outputs'] = cls._output_file_params_from_dict(
job.get('outputs', {}), False)
job_params['output-recursives'] = cls._output_file_params_from_dict(
job.get('output-recursives', {}), True)
job_params['mounts'] = cls._mount_params_from_dict(job.get('mounts', {}))
task_descriptors = []
for task in job.get('tasks', []):
task_metadata = {'task-id': task.get('task-id')}
# Old instances of the meta.yaml do not have a task create time.
create_time = task.get('create-time')
if create_time:
task_metadata['create-time'] = dsub_util.replace_timezone(
create_time, pytz.utc)
if task.get('task-attempt') is not None:
task_metadata['task-attempt'] = task.get('task-attempt')
task_params = {}
task_params['labels'] = cls._label_params_from_dict(
task.get('labels', {}))
task_params['envs'] = cls._env_params_from_dict(task.get('envs', {}))
task_params['inputs'] = cls._input_file_params_from_dict(
task.get('inputs', {}), False)
task_params['input-recursives'] = cls._input_file_params_from_dict(
task.get('input-recursives', {}), True)
task_params['outputs'] = cls._output_file_params_from_dict(
task.get('outputs', {}), False)
task_params['output-recursives'] = cls._output_file_params_from_dict(
task.get('output-recursives', {}), True)
task_resources = Resources(logging_path=task.get('logging-path'))
task_descriptors.append(
TaskDescriptor(task_metadata, task_params, task_resources))
return JobDescriptor(job_metadata, job_params, job_resources,
task_descriptors)
|
def find_task_descriptor(self, task_id):
"""Returns the task_descriptor corresponding to task_id."""
# It is not guaranteed that the index will be task_id - 1 when --tasks is
# used with a min/max range.
for task_descriptor in self.task_descriptors:
if task_descriptor.task_metadata.get('task-id') == task_id:
return task_descriptor
return None
|
def get_file_environment_variables(file_params):
"""Return a dictionary of environment variables for the user container."""
env = {}
for param in file_params:
# We have no cases where the environment variable provided to user
# scripts have a trailing slash, so be sure to always strip it.
# The case that this is specifically handling is --input-recursive and
# --output-recursive variables, which are directory values.
env[param.name] = os.path.join(
DATA_MOUNT_POINT, param.docker_path.rstrip('/')) if param.value else ''
return env
|
def build_recursive_localize_env(destination, inputs):
"""Return a multi-line string with export statements for the variables.
Arguments:
destination: Folder where the data will be put.
For example /mnt/data
inputs: a list of InputFileParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the inputs.
"""
export_input_dirs = '\n'.join([
'export {0}={1}/{2}'.format(var.name, destination.rstrip('/'),
var.docker_path.rstrip('/'))
for var in inputs
if var.recursive and var.docker_path
])
return export_input_dirs
|
def build_recursive_localize_command(destination, inputs, file_provider):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
destination: Folder where to put the data.
For example /mnt/data
inputs: a list of InputFileParam
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively from GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_provider]
filtered_inputs = [
var for var in inputs
if var.recursive and var.file_provider == file_provider
]
copy_input_dirs = '\n'.join([
textwrap.dedent("""
mkdir -p {data_mount}/{docker_path}
for ((i = 0; i < 3; i++)); do
if {command} {source_uri} {data_mount}/{docker_path}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive localization failed."
exit 1
fi
done
chmod -R o+r {data_mount}/{docker_path}
""").format(
command=command,
source_uri=var.uri,
data_mount=destination.rstrip('/'),
docker_path=var.docker_path) for var in filtered_inputs
])
return copy_input_dirs
|
def build_recursive_gcs_delocalize_env(source, outputs):
"""Return a multi-line string with export statements for the variables.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the outputs.
"""
filtered_outs = [
var for var in outputs
if var.recursive and var.file_provider == job_model.P_GCS
]
return '\n'.join([
'export {0}={1}/{2}'.format(var.name,
source.rstrip('/'),
var.docker_path.rstrip('/'))
for var in filtered_outs
])
|
def build_recursive_delocalize_command(source, outputs, file_provider):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam.
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively to GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_provider]
filtered_outputs = [
var for var in outputs
if var.recursive and var.file_provider == file_provider
]
return '\n'.join([
textwrap.dedent("""
for ((i = 0; i < 3; i++)); do
if {command} {data_mount}/{docker_path} {destination_uri}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive de-localization failed."
exit 1
fi
done
""").format(
command=command,
data_mount=source.rstrip('/'),
docker_path=var.docker_path,
destination_uri=var.uri) for var in filtered_outputs
])
|
def build_mount_env(source, mounts):
"""Return a multi-line string with export statements for the variables.
Arguments:
source: Folder with the data. For example /mnt/data
mounts: a list of MountParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the mounts.
"""
return '\n'.join([
'export {0}={1}/{2}'.format(var.name, source.rstrip('/'),
var.docker_path.rstrip('/')) for var in mounts
])
|
def get_job_and_task_param(job_params, task_params, field):
"""Returns a dict combining the field for job and task params."""
return job_params.get(field, set()) | task_params.get(field, set())
|
def _parse_arguments():
"""Parses command line arguments.
Returns:
A Namespace of parsed arguments.
"""
# Handle version flag and exit if it was passed.
param_util.handle_version_flag()
parser = provider_base.create_parser(sys.argv[0])
parser.add_argument(
'--version', '-v', default=False, help='Print the dsub version and exit.')
parser.add_argument(
'--jobs',
'-j',
required=True,
nargs='*',
help='List of job-ids to delete. Use "*" to delete all running jobs.')
parser.add_argument(
'--tasks',
'-t',
nargs='*',
help='List of tasks in an array job to delete.')
parser.add_argument(
'--users',
'-u',
nargs='*',
default=[],
help="""Deletes only those jobs which were submitted by the list of users.
Use "*" to delete jobs of any user.""")
parser.add_argument(
'--age',
help="""Deletes only those jobs newer than the specified age. Ages can be
listed using a number followed by a unit. Supported units are
s (seconds), m (minutes), h (hours), d (days), w (weeks).
For example: '7d' (7 days). Bare numbers are treated as UTC.""")
parser.add_argument(
'--label',
nargs='*',
action=param_util.ListParamAction,
default=[],
help='User labels to match. Tasks returned must match all labels.',
metavar='KEY=VALUE')
# Shared arguments between the "google" and "google-v2" providers
google_common = parser.add_argument_group(
title='google-common',
description='Options common to the "google" and "google-v2" providers')
google_common.add_argument(
'--project',
help='Cloud project ID in which to find and delete the job(s)')
return provider_base.parse_args(
parser, {
'google': ['project'],
'google-v2': ['project'],
'test-fails': [],
'local': [],
}, sys.argv[1:])
|
def _emit_search_criteria(user_ids, job_ids, task_ids, labels):
"""Print the filters used to delete tasks. Use raw flags as arguments."""
print('Delete running jobs:')
print(' user:')
print(' %s\n' % user_ids)
print(' job-id:')
print(' %s\n' % job_ids)
if task_ids:
print(' task-id:')
print(' %s\n' % task_ids)
# Labels are in a LabelParam namedtuple and must be reformated for printing.
if labels:
print(' labels:')
print(' %s\n' % repr(labels))
|
def ddel_tasks(provider,
user_ids=None,
job_ids=None,
task_ids=None,
labels=None,
create_time_min=None,
create_time_max=None):
"""Kill jobs or job tasks.
This function separates ddel logic from flag parsing and user output. Users
of ddel who intend to access the data programmatically should use this.
Args:
provider: an instantiated dsub provider.
user_ids: a set of user ids who "own" the job(s) to delete.
job_ids: a set of job ids to delete.
task_ids: a set of task ids to delete.
labels: a set of LabelParam, each must match the job(s) to be cancelled.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent create
time of a task, inclusive.
Returns:
list of job ids which were deleted.
"""
# Delete the requested jobs
deleted_tasks, error_messages = provider.delete_jobs(
user_ids, job_ids, task_ids, labels, create_time_min, create_time_max)
# Emit any errors canceling jobs
for msg in error_messages:
print(msg)
return deleted_tasks
|
def get_action_by_id(op, action_id):
"""Return the operation's array of actions."""
actions = get_actions(op)
if actions and 1 <= action_id < len(actions):
return actions[action_id - 1]
|
def _get_action_by_name(op, name):
"""Return the value for the specified action."""
actions = get_actions(op)
for action in actions:
if action.get('name') == name:
return action
|
def get_action_environment(op, name):
"""Return the environment for the operation."""
action = _get_action_by_name(op, name)
if action:
return action.get('environment')
|
def get_action_image(op, name):
"""Return the image for the operation."""
action = _get_action_by_name(op, name)
if action:
return action.get('imageUri')
|
def get_failed_events(op):
"""Return the events (if any) with a non-zero exitStatus."""
events = get_events(op)
if events:
return [
e for e in events if int(e.get('details', {}).get('exitStatus', 0)) != 0
]
return None
|
def get_event_of_type(op, event_type):
"""Return all events of a particular type."""
events = get_events(op)
if not events:
return None
return [e for e in events if e.get('details', {}).get('@type') == event_type]
|
def get_last_update(op):
"""Return the most recent timestamp in the operation."""
last_update = get_end_time(op)
if not last_update:
last_event = get_last_event(op)
if last_event:
last_update = last_event['timestamp']
if not last_update:
last_update = get_create_time(op)
return last_update
|
def is_dsub_operation(op):
"""Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed. The dsub-version
label has always existed for the google-v2 provider.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub.
"""
if not is_pipeline(op):
return False
for name in ['dsub-version', 'job-id', 'job-name', 'user-id']:
if not get_label(op, name):
return False
return True
|
def _prepare_summary_table(rows):
"""Create a new table that is a summary of the input rows.
All with the same (job-name or job-id, status) go together.
Args:
rows: the input rows, a list of dictionaries.
Returns:
A new row set of summary information.
"""
if not rows:
return []
# We either group on the job-name (if present) or fall back to the job-id
key_field = 'job-name'
if key_field not in rows[0]:
key_field = 'job-id'
# Group each of the rows based on (job-name or job-id, status)
grouped = collections.defaultdict(lambda: collections.defaultdict(lambda: []))
for row in rows:
grouped[row.get(key_field, '')][row.get('status', '')] += [row]
# Now that we have the rows grouped, create a summary table.
# Use the original table as the driver in order to preserve the order.
new_rows = []
for job_key in sorted(grouped.keys()):
group = grouped.get(job_key, None)
canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE', 'CANCEL']
# Written this way to ensure that if somehow a new status is introduced,
# it shows up in our output.
for status in canonical_status + sorted(group.keys()):
if status not in group:
continue
task_count = len(group[status])
del group[status]
if task_count:
summary_row = collections.OrderedDict()
summary_row[key_field] = job_key
summary_row['status'] = status
summary_row['task-count'] = task_count
new_rows.append(summary_row)
return new_rows
|
def _prepare_row(task, full, summary):
"""return a dict with the task's info (more if "full" is set)."""
# Would like to include the Job ID in the default set of columns, but
# it is a long value and would leave little room for status and update time.
row_spec = collections.namedtuple('row_spec',
['key', 'required', 'default_value'])
# pyformat: disable
default_columns = [
row_spec('job-name', True, None),
row_spec('task-id', False, None),
row_spec('last-update', True, None),
row_spec('status-message', True, None)
]
full_columns = default_columns + [
row_spec('job-id', True, None),
row_spec('user-id', True, None),
row_spec('status', True, None),
row_spec('status-detail', True, None),
row_spec('task-attempt', False, None),
row_spec('create-time', True, None),
row_spec('start-time', True, None),
row_spec('end-time', True, None),
row_spec('internal-id', True, None),
row_spec('logging', True, None),
row_spec('labels', True, {}),
row_spec('envs', True, {}),
row_spec('inputs', True, {}),
row_spec('input-recursives', False, {}),
row_spec('outputs', True, {}),
row_spec('output-recursives', False, {}),
row_spec('mounts', True, {}),
row_spec('provider', True, None),
row_spec('provider-attributes', True, {}),
row_spec('events', True, []),
row_spec('user-project', False, None),
row_spec('dsub-version', False, None),
row_spec('script-name', False, None),
row_spec('script', False, None),
]
summary_columns = default_columns + [
row_spec('job-id', True, None),
row_spec('user-id', True, None),
row_spec('status', True, None),
]
# pyformat: enable
assert not (full and summary), 'Full and summary cannot both be enabled'
if full:
columns = full_columns
elif summary:
columns = summary_columns
else:
columns = default_columns
row = {}
for col in columns:
key, required, default = col
value = task.get_field(key, default)
if required or value is not None:
row[key] = value
return row
|
def _parse_arguments():
"""Parses command line arguments.
Returns:
A Namespace of parsed arguments.
"""
# Handle version flag and exit if it was passed.
param_util.handle_version_flag()
parser = provider_base.create_parser(sys.argv[0])
parser.add_argument(
'--version', '-v', default=False, help='Print the dsub version and exit.')
parser.add_argument(
'--jobs',
'-j',
nargs='*',
help='A list of jobs IDs on which to check status')
parser.add_argument(
'--names',
'-n',
nargs='*',
help='A list of job names on which to check status')
parser.add_argument(
'--tasks',
'-t',
nargs='*',
help='A list of task IDs on which to check status')
parser.add_argument(
'--attempts',
nargs='*',
help='A list of task attempts on which to check status')
parser.add_argument(
'--users',
'-u',
nargs='*',
default=[],
help="""Lists only those jobs which were submitted by the list of users.
Use "*" to list jobs of any user.""")
parser.add_argument(
'--status',
'-s',
nargs='*',
default=['RUNNING'],
choices=['RUNNING', 'SUCCESS', 'FAILURE', 'CANCELED', '*'],
help="""Lists only those jobs which match the specified status(es).
Choose from {'RUNNING', 'SUCCESS', 'FAILURE', 'CANCELED'}.
Use "*" to list jobs of any status.""",
metavar='STATUS')
parser.add_argument(
'--age',
help="""List only those jobs newer than the specified age. Ages can be
listed using a number followed by a unit. Supported units are
s (seconds), m (minutes), h (hours), d (days), w (weeks).
For example: '7d' (7 days). Bare numbers are treated as UTC.""")
parser.add_argument(
'--label',
nargs='*',
action=param_util.ListParamAction,
default=[],
help='User labels to match. Tasks returned must match all labels.',
metavar='KEY=VALUE')
parser.add_argument(
'--poll-interval',
default=10,
type=int,
help='Polling interval (in seconds) for checking job status '
'when --wait is set.')
parser.add_argument(
'--wait', action='store_true', help='Wait until jobs have all completed.')
parser.add_argument(
'--limit',
default=0,
type=int,
help='The maximum number of tasks to list. The default is unlimited.')
parser.add_argument(
'--format',
choices=['text', 'json', 'yaml', 'provider-json'],
help='Set the output format.')
output_style = parser.add_mutually_exclusive_group()
output_style.add_argument(
'--full',
'-f',
action='store_true',
help='Display output with full task information'
' and input parameters.')
output_style.add_argument(
'--summary',
action='store_true',
help='Display a summary of the results, grouped by (job, status).')
# Shared arguments between the "google" and "google-v2" providers
google_common = parser.add_argument_group(
title='google-common',
description='Options common to the "google" and "google-v2" providers')
google_common.add_argument(
'--project',
help='Cloud project ID in which to find and delete the job(s)')
return provider_base.parse_args(
parser, {
'google': ['project'],
'google-v2': ['project'],
'test-fails': [],
'local': [],
}, sys.argv[1:])
|
def dstat_job_producer(provider,
statuses,
user_ids=None,
job_ids=None,
job_names=None,
task_ids=None,
task_attempts=None,
labels=None,
create_time_min=None,
create_time_max=None,
max_tasks=0,
full_output=False,
summary_output=False,
poll_interval=0,
raw_format=False):
"""Generate jobs as lists of task dicts ready for formatting/output.
Args:
provider: an instantiated dsub provider.
statuses: a set of status strings that eligible jobs may match.
user_ids: a set of user strings that eligible jobs may match.
job_ids: a set of job-id strings eligible jobs may match.
job_names: a set of job-name strings eligible jobs may match.
task_ids: a set of task-id strings eligible tasks may match.
task_attempts: a set of task-attempt strings eligible tasks may match.
labels: set of LabelParam that all tasks must match.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent create
time of a task, inclusive.
max_tasks: (int) maximum number of tasks to return per dstat job lookup.
full_output: (bool) return all dsub fields.
summary_output: (bool) return a summary of the job list.
poll_interval: (int) wait time between poll events, dstat will poll jobs
until all jobs succeed or fail. Set to zero to disable
polling and return after the first lookup.
raw_format: (bool) set True to prevent dsub from normalizing the task dict,
this defaults to False and should only be set True if a
provider-specific view of tasks is absolutely required.
(NB: provider interfaces change over time, no transition path
will be provided for users depending on this flag).
Yields:
lists of task dictionaries - each list representing a dstat poll event.
"""
some_job_running = True
while some_job_running:
# Get a batch of jobs.
tasks = provider.lookup_job_tasks(
statuses,
user_ids=user_ids,
job_ids=job_ids,
job_names=job_names,
task_ids=task_ids,
task_attempts=task_attempts,
labels=labels,
create_time_min=create_time_min,
create_time_max=create_time_max,
max_tasks=max_tasks,
page_size=max_tasks)
some_job_running = False
formatted_tasks = []
for task in tasks:
if 0 < max_tasks <= len(formatted_tasks):
break
# Format tasks as specified.
if raw_format:
formatted_tasks.append(task.raw_task_data())
else:
formatted_tasks.append(_prepare_row(task, full_output, summary_output))
# Determine if any of the jobs are running.
if task.get_field('task-status') == 'RUNNING':
some_job_running = True
# Yield the tasks and determine if the loop should continue.
yield formatted_tasks
if poll_interval and some_job_running:
time.sleep(poll_interval)
else:
break
|
def lookup_job_tasks(provider,
statuses,
user_ids=None,
job_ids=None,
job_names=None,
task_ids=None,
task_attempts=None,
labels=None,
create_time_min=None,
create_time_max=None,
max_tasks=0,
page_size=0,
summary_output=False):
"""Generate formatted jobs individually, in order of create-time.
Args:
provider: an instantiated dsub provider.
statuses: a set of status strings that eligible jobs may match.
user_ids: a set of user strings that eligible jobs may match.
job_ids: a set of job-id strings eligible jobs may match.
job_names: a set of job-name strings eligible jobs may match.
task_ids: a set of task-id strings eligible tasks may match.
task_attempts: a set of task-attempt strings eligible tasks may match.
labels: set of LabelParam that all tasks must match.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent create
time of a task, inclusive.
max_tasks: (int) maximum number of tasks to return per dstat job lookup.
page_size: the page size to use for each query to the backend. May be
ignored by some provider implementations.
summary_output: (bool) summarize the job list.
Yields:
Individual task dictionaries with associated metadata
"""
tasks_generator = provider.lookup_job_tasks(
statuses,
user_ids=user_ids,
job_ids=job_ids,
job_names=job_names,
task_ids=task_ids,
task_attempts=task_attempts,
labels=labels,
create_time_min=create_time_min,
create_time_max=create_time_max,
max_tasks=max_tasks,
page_size=page_size)
# Yield formatted tasks.
for task in tasks_generator:
yield _prepare_row(task, True, summary_output)
|
def prepare_output(self, row):
"""Convert types of task fields."""
date_fields = ['last-update', 'create-time', 'start-time', 'end-time']
int_fields = ['task-attempt']
for col in date_fields:
if col in row:
row[col] = self.default_format_date(row[col])
for col in int_fields:
if col in row and row[col] is not None:
row[col] = int(row[col])
return row
|
def trim_display_field(self, value, max_length):
"""Return a value for display; if longer than max length, use ellipsis."""
if not value:
return ''
if len(value) > max_length:
return value[:max_length - 3] + '...'
return value
|
def format_pairs(self, values):
"""Returns a string of comma-delimited key=value pairs."""
return ', '.join(
'%s=%s' % (key, value) for key, value in sorted(values.items()))
|
def string_presenter(self, dumper, data):
"""Presenter to force yaml.dump to use multi-line string style."""
if '\n' in data:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
|
def get_zones(input_list):
"""Returns a list of zones based on any wildcard input.
This function is intended to provide an easy method for producing a list
of desired zones for a pipeline to run in.
The Pipelines API default zone list is "any zone". The problem with
"any zone" is that it can lead to incurring Cloud Storage egress charges
if the GCE zone selected is in a different region than the GCS bucket.
See https://cloud.google.com/storage/pricing#network-egress.
A user with a multi-region US bucket would want to pipelines to run in
a "us-*" zone.
A user with a regional bucket in US would want to restrict pipelines to
run in a zone in that region.
Rarely does the specific zone matter for a pipeline.
This function allows for a simple short-hand such as:
[ "us-*" ]
[ "us-central1-*" ]
These examples will expand out to the full list of US and us-central1 zones
respectively.
Args:
input_list: list of zone names/patterns
Returns:
A list of zones, with any wildcard zone specifications expanded.
"""
if not input_list:
return []
output_list = []
for zone in input_list:
if zone.endswith('*'):
prefix = zone[:-1]
output_list.extend([z for z in _ZONES if z.startswith(prefix)])
else:
output_list.append(zone)
return output_list
|
def build_pipeline_labels(job_metadata, task_metadata, task_id_pattern=None):
"""Build a set() of standard job and task labels.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
task_metadata: Task metadata, such as the task-id.
task_id_pattern: A pattern for the task-id value, such as "task-%d"; the
original google label values could not be strictly numeric, so "task-"
was prepended.
Returns:
A set of standard dsub Label() objects to attach to a pipeline.
"""
labels = {
Label(name, job_metadata[name])
for name in ['job-name', 'job-id', 'user-id', 'dsub-version']
}
task_id = task_metadata.get('task-id')
if task_id is not None: # Check for None (as 0 is conceivably valid)
if task_id_pattern:
task_id = task_id_pattern % task_id
labels.add(Label('task-id', str(task_id)))
task_attempt = task_metadata.get('task-attempt')
if task_attempt is not None:
labels.add(Label('task-attempt', str(task_attempt)))
return labels
|
def prepare_job_metadata(script, job_name, user_id, create_time):
"""Returns a dictionary of metadata fields for the job."""
# The name of the pipeline gets set into the ephemeralPipeline.name as-is.
# The default name of the pipeline is the script name
# The name of the job is derived from the job_name and gets set as a
# 'job-name' label (and so the value must be normalized).
if job_name:
pipeline_name = job_name
job_name_value = job_model.convert_to_label_chars(job_name)
else:
pipeline_name = os.path.basename(script)
job_name_value = job_model.convert_to_label_chars(
pipeline_name.split('.', 1)[0])
# The user-id will get set as a label
user_id = job_model.convert_to_label_chars(user_id)
# Now build the job-id. We want the job-id to be expressive while also
# having a low-likelihood of collisions.
#
# For expressiveness, we:
# * use the job name (truncated at 10 characters).
# * insert the user-id
# * add a datetime value
# To have a high likelihood of uniqueness, the datetime value is out to
# hundredths of a second.
#
# The full job-id is:
# <job-name>--<user-id>--<timestamp>
job_id = '%s--%s--%s' % (job_name_value[:10], user_id,
create_time.strftime('%y%m%d-%H%M%S-%f')[:16])
# Standard version is MAJOR.MINOR(.PATCH). This will convert the version
# string to "vMAJOR-MINOR(-PATCH)". Example; "0.1.0" -> "v0-1-0".
version = job_model.convert_to_label_chars('v%s' % DSUB_VERSION)
return {
'pipeline-name': pipeline_name,
'job-name': job_name_value,
'job-id': job_id,
'user-id': user_id,
'dsub-version': version,
}
|
def parse_rfc3339_utc_string(rfc3339_utc_string):
"""Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
"""
# The timestamp from the Google Operations are all in RFC3339 format, but
# they are sometimes formatted to millisconds, microseconds, sometimes
# nanoseconds, and sometimes only seconds:
# * 2016-11-14T23:05:56Z
# * 2016-11-14T23:05:56.010Z
# * 2016-11-14T23:05:56.010429Z
# * 2016-11-14T23:05:56.010429380Z
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z',
rfc3339_utc_string)
# It would be unexpected to get a different date format back from Google.
# If we raise an exception here, we can break people completely.
# Instead, let's just return None and people can report that some dates
# are not showing up.
# We might reconsider this approach in the future; it was originally
# established when dates were only used for display.
if not m:
return None
groups = m.groups()
if len(groups[6]) not in (0, 3, 6, 9):
return None
# Create a UTC datestamp from parsed components
# 1- Turn components 0-5 from strings to integers
# 2- If the last component does not exist, set it to 0.
# If it does exist, make sure to interpret it as milliseconds.
g = [int(val) for val in groups[:6]]
fraction = groups[6]
if not fraction:
micros = 0
elif len(fraction) == 3:
micros = int(fraction) * 1000
elif len(fraction) == 6:
micros = int(fraction)
elif len(fraction) == 9:
# When nanoseconds are provided, we round
micros = int(round(int(fraction) / 1000))
else:
assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction)
try:
return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)
except ValueError as e:
assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(
rfc3339_utc_string, e)
|
def get_operation_full_job_id(op):
"""Returns the job-id or job-id.task-id for the operation."""
job_id = op.get_field('job-id')
task_id = op.get_field('task-id')
if task_id:
return '%s.%s' % (job_id, task_id)
else:
return job_id
|
def _cancel_batch(batch_fn, cancel_fn, ops):
"""Cancel a batch of operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# We define an inline callback which will populate a list of
# successfully canceled operations as well as a list of operations
# which were not successfully canceled.
canceled = []
failed = []
def handle_cancel_response(request_id, response, exception):
"""Callback for the cancel response."""
del response # unused
if exception:
# We don't generally expect any failures here, except possibly trying
# to cancel an operation that is already canceled or finished.
#
# If the operation is already finished, provide a clearer message than
# "error 400: Bad Request".
msg = 'error %s: %s' % (exception.resp.status, exception.resp.reason)
if exception.resp.status == FAILED_PRECONDITION_CODE:
detail = json.loads(exception.content)
status = detail.get('error', {}).get('status')
if status == FAILED_PRECONDITION_STATUS:
msg = 'Not running'
failed.append({'name': request_id, 'msg': msg})
else:
canceled.append({'name': request_id})
return
# Set up the batch object
batch = batch_fn(callback=handle_cancel_response)
# The callback gets a "request_id" which is the operation name.
# Build a dict such that after the callback, we can lookup the operation
# objects by name
ops_by_name = {}
for op in ops:
op_name = op.get_field('internal-id')
ops_by_name[op_name] = op
batch.add(cancel_fn(name=op_name, body={}), request_id=op_name)
# Cancel the operations
batch.execute()
# Iterate through the canceled and failed lists to build our return lists
canceled_ops = [ops_by_name[op['name']] for op in canceled]
error_messages = []
for fail in failed:
op = ops_by_name[fail['name']]
error_messages.append("Error canceling '%s': %s" %
(get_operation_full_job_id(op), fail['msg']))
return canceled_ops, error_messages
|
def cancel(batch_fn, cancel_fn, ops):
"""Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# Canceling many operations one-by-one can be slow.
# The Pipelines API doesn't directly support a list of operations to cancel,
# but the requests can be performed in batch.
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages
|
def retry_api_check(exception):
"""Return True if we should retry. False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
"""
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in TRANSIENT_HTTP_ERROR_CODES:
_print_error('Retrying...')
return True
if isinstance(exception, socket.error):
if exception.errno in TRANSIENT_SOCKET_ERROR_CODES:
_print_error('Retrying...')
return True
if isinstance(exception, oauth2client.client.AccessTokenRefreshError):
_print_error('Retrying...')
return True
# For a given installation, this could be a permanent error, but has only
# been observed as transient.
if isinstance(exception, SSLError):
_print_error('Retrying...')
return True
# This has been observed as a transient error:
# ServerNotFoundError: Unable to find the server at genomics.googleapis.com
if isinstance(exception, ServerNotFoundError):
_print_error('Retrying...')
return True
return False
|
def retry_auth_check(exception):
"""Specific check for auth error codes.
Return True if we should retry.
False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
"""
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in HTTP_AUTH_ERROR_CODES:
_print_error('Retrying...')
return True
return False
|
def setup_service(api_name, api_version, credentials=None):
"""Configures genomics API client.
Args:
api_name: Name of the Google API (for example: "genomics")
api_version: Version of the API (for example: "v2alpha1")
credentials: Credentials to be used for the gcloud API calls.
Returns:
A configured Google Genomics API client with appropriate credentials.
"""
if not credentials:
credentials = oauth2client.client.GoogleCredentials.get_application_default(
)
return apiclient.discovery.build(
api_name, api_version, credentials=credentials)
|
def execute(api):
"""Executes operation.
Args:
api: The base API object
Returns:
A response body object
"""
try:
return api.execute()
except Exception as exception:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
_print_error('%s: Exception %s: %s' % (now, type(exception).__name__,
str(exception)))
# Re-raise exception to be handled by retry logic
raise exception
|
def _eval_arg_type(arg_type, T=Any, arg=None, sig=None):
"""Returns a type from a snippit of python source. Should normally be
something just like 'str' or 'Object'.
arg_type the source to be evaluated
T the default type
arg context of where this type was extracted
sig context from where the arg was extracted
Returns a type or a Type
"""
try:
T = eval(arg_type)
except Exception as e:
raise ValueError('The type of {0} could not be evaluated in {1} for {2}: {3}' \
.format(arg_type, arg, sig, text_type(e)))
else:
if type(T) not in (type, Type):
raise TypeError('{0} is not a valid type in {1} for {2}' \
.format(repr(T), arg, sig))
return T
|
def jsonify_status_code(status_code, *args, **kw):
"""Returns a jsonified response with the specified HTTP status code.
The positional and keyword arguments are passed directly to the
:func:`flask.jsonify` function which creates the response.
"""
is_batch = kw.pop('is_batch', False)
if is_batch:
response = flask_make_response(json.dumps(*args, **kw))
response.mimetype = 'application/json'
response.status_code = status_code
return response
response = jsonify(*args, **kw)
response.status_code = status_code
return response
|
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self.jsonrpc_site = options.get('jsonrpc_site')
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder and \
not self.name + '.static' in state.app.view_functions.keys():
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.