code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Prevent duplication of "AppError" in places that print "AppError" # and then this formatted string if isinstance(e, dxpy.AppError): return _safe_unicode(e) if USING_PYTHON2: return unicode(e.__class__.__name__, 'utf-8') + ": " + _safe_unicode(e) else: return e.__class__.__name__ + ": " + _safe_unicode(e)
def _format_exception_message(e)
Formats the specified exception.
5.372414
5.359707
1.002371
global RUN_COUNT RUN_COUNT += 1 dx_working_dir = os.getcwd() if dxpy.JOB_ID is not None: logging.basicConfig() try: logging.getLogger().addHandler(dxpy.DXLogHandler()) except dxpy.exceptions.DXError: print("TODO: FIXME: the EE client should die if logging is not available") job = dxpy.describe(dxpy.JOB_ID) else: if function_name is None: function_name = os.environ.get('DX_TEST_FUNCTION', 'main') if function_input is None: with open("job_input.json", "r") as fh: function_input = json.load(fh) job = {'function': function_name, 'input': function_input} with open("job_error_reserved_space", "w") as fh: fh.write("This file contains reserved space for writing job errors in case the filesystem becomes full.\n" + " "*1024*64) print("Invoking", job.get('function'), "with", job.get('input')) try: result = ENTRY_POINT_TABLE[job['function']](**job['input']) except dxpy.AppError as e: save_error(e, dx_working_dir, error_type="AppError") raise except Exception as e: save_error(e, dx_working_dir) raise if result is not None: # TODO: protect against client removing its original working directory os.chdir(dx_working_dir) if USING_PYTHON2: # On python-2 we need to use binary mode with open("job_output.json", "wb") as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write(b"\n") else: with open("job_output.json", "w") as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write("\n") return result
def run(function_name=None, function_input=None)
Triggers the execution environment entry point processor. Use this function in the program entry point code: .. code-block:: python import dxpy @dxpy.entry_point('main') def hello(i): pass dxpy.run() This method may be used to invoke the program either in a production environment (inside the execution environment) or for local debugging (in the debug harness), as follows: If the environment variable *DX_JOB_ID* is set, the processor retrieves the job with that ID from the API server. The job's *function* field indicates the function name to be invoked. That function name is looked up in the table of all methods decorated with *@dxpy.entry_point('name')* in the module from which :func:`run()` was called, and the matching method is invoked (with the job's input supplied as parameters). This is the mode of operation used in the DNAnexus execution environment. .. warning:: The parameters *function_name* and *function_input* are disregarded in this mode of operation. If the environment variable *DX_JOB_ID* is not set, the function name may be given in *function_name*; if not set, it is set by the environment variable *DX_TEST_FUNCTION*. The function input may be given in *function_input*; if not set, it is set by the local file *job_input.json* which is expected to be present. The absence of *DX_JOB_ID* signals to :func:`run()` that execution is happening in the debug harness. In this mode of operation, all calls to :func:`dxpy.bindings.dxjob.new_dxjob()` (and higher level handler methods which use it) are intercepted, and :func:`run()` is invoked instead with appropriate inputs.
3.551889
3.414732
1.040166
def wrap(f): ENTRY_POINT_TABLE[entry_point_name] = f @wraps(f) def wrapped_f(*args, **kwargs): return f(*args, **kwargs) return wrapped_f return wrap
def entry_point(entry_point_name)
Use this to decorate a DNAnexus execution environment entry point. Example: .. code-block:: python @dxpy.entry_point('main') def hello(i): pass
2.369514
3.251192
0.728814
''' :param path: a path or ID to a workflow object :type path: string :returns: tuple of (workflow ID, project ID) Returns the workflow and project IDs from the given path if available; otherwise, exits with an appropriate error message. ''' project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected='entity') try: if entity_result is None or not entity_result['id'].startswith('workflow-'): raise DXCLIError('Could not resolve "' + path + '" to a workflow object') except: err_exit() return entity_result['id'], project
def get_workflow_id_and_project(path)
:param path: a path or ID to a workflow object :type path: string :returns: tuple of (workflow ID, project ID) Returns the workflow and project IDs from the given path if available; otherwise, exits with an appropriate error message.
6.620606
3.866107
1.712473
authserver = get_auth_server_name(authserver_host, authserver_port) return DXHTTPRequest(authserver + "/system/getUserInfo", {}, prepend_srv=False)
def user_info(authserver_host=None, authserver_port=None)
Returns the result of the user_info call against the specified auth server. .. deprecated:: 0.108.0 Use :func:`whoami` instead where possible.
7.835588
10.447039
0.75003
''' :param job_homedir: explicit value for home directory, used for testing purposes :rtype: string :returns: path to input directory Returns the input directory, where all inputs are downloaded ''' if job_homedir is not None: home_dir = job_homedir else: home_dir = environ.get('HOME') idir = os.path.join(home_dir, 'in') return idir
def get_input_dir(job_homedir=None)
:param job_homedir: explicit value for home directory, used for testing purposes :rtype: string :returns: path to input directory Returns the input directory, where all inputs are downloaded
3.903677
2.034384
1.91885
''' :param job_homedir: explicit value for home directory, used for testing purposes :rtype: string :returns: path to output directory Returns the output directory, where all outputs are created, and uploaded from ''' if job_homedir is not None: home_dir = job_homedir else: home_dir = environ.get('HOME') odir = os.path.join(home_dir, 'out') return odir
def get_output_dir(job_homedir=None)
:param job_homedir: explicit value for home directory, used for testing purposes :rtype: string :returns: path to output directory Returns the output directory, where all outputs are created, and uploaded from
4.331969
1.963668
2.20606
path = get_output_json_file() try: os.remove(path) except OSError as e: if e.errno == errno.ENOENT: pass else: raise
def rm_output_json_file()
Warning: this is not for casual use. It erases the output json file, and should be used for testing purposes only.
2.327709
2.226411
1.045499
if not os.path.exists(path): # path does not exist, create the directory os.mkdir(path) else: # The path exists, check that it is not a file if os.path.isfile(path): raise Exception("Path %s already exists, and it is a file, not a directory" % path)
def ensure_dir(path)
:param path: path to directory to be created Create a directory if it does not already exist.
2.789753
2.873112
0.970987
# sanity check for filenames bad_filenames = [".", ".."] if fname in bad_filenames: raise DXError("Invalid filename {}".format(fname)) return fname.replace('/', '%2F')
def make_unix_filename(fname)
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx). :returns: a valid unix filename :rtype: string :raises: DXError if the filename is invalid on a Unix system The problem being solved here is that *fname* is a python string, it may contain characters that are invalid for a file name. We replace all the slashes with %2F. Another issue, is that the user may choose an invalid name. Since we focus on Unix systems, the only possibilies are "." and "..".
6.133373
4.530398
1.353826
def get_input_hash(): with open(job_input_file) as fh: job_input = json.load(fh) return job_input job_input = get_input_hash() files = collections.defaultdict(list) # dictionary, with empty lists as default elements dirs = [] # directories to create under <idir> # Local function for adding a file to the list of files to be created # for example: # iname == "seq1" # subdir == "015" # value == { "$dnanexus_link": { # "project": "project-BKJfY1j0b06Z4y8PX8bQ094f", # "id": "file-BKQGkgQ0b06xG5560GGQ001B" # } # will create a record describing that the file should # be downloaded into seq1/015/<filename> def add_file(iname, subdir, value): if not dxpy.is_dxlink(value): return handler = dxpy.get_handler(value) if not isinstance(handler, dxpy.DXFile): return filename = make_unix_filename(handler.name) trg_dir = iname if subdir is not None: trg_dir = os.path.join(trg_dir, subdir) files[iname].append({'trg_fname': os.path.join(trg_dir, filename), 'handler': handler, 'src_file_id': handler.id}) dirs.append(trg_dir) # An array of inputs, for a single key. A directory # will be created per array entry. For example, if the input key is # FOO, and the inputs are {A, B, C}.vcf then, the directory structure # will be: # <idir>/FOO/00/A.vcf # <idir>/FOO/01/B.vcf # <idir>/FOO/02/C.vcf def add_file_array(input_name, links): num_files = len(links) if num_files == 0: return num_digits = len(str(num_files - 1)) dirs.append(input_name) for i, link in enumerate(links): subdir = str(i).zfill(num_digits) add_file(input_name, subdir, link) for input_name, value in list(job_input.items()): if isinstance(value, list): # This is a file array add_file_array(input_name, value) else: add_file(input_name, None, value) ## create a dictionary of the all non-file elements rest_hash = {key: val for key, val in list(job_input.items()) if key not in files} return dirs, files, rest_hash
def get_job_input_filenames(job_input_file)
Extract list of files, returns a set of directories to create, and a set of files, with sources and destinations. The paths created are relative to the input directory. Note: we go through file names inside arrays, and create a separate subdirectory for each. This avoids clobbering files when duplicate filenames appear in an array.
4.519178
4.47313
1.010294
''' Extract the inputSpec patterns, if they exist -- modifed from dx-upload-all-outputs Returns a dict of all patterns, with keys equal to the respective input parameter names. ''' input_spec = None if 'DX_JOB_ID' in environ: # works in the cloud, not locally job_desc = dxpy.describe(dxpy.JOB_ID) if job_desc["function"] == "main": # The input spec does not apply for subjobs desc = dxpy.describe(job_desc.get("app", job_desc.get("applet"))) if "inputSpec" in desc: input_spec = desc["inputSpec"] elif 'DX_TEST_DXAPP_JSON' in environ: # works only locally path_to_dxapp_json = environ['DX_TEST_DXAPP_JSON'] with open(path_to_dxapp_json) as fd: dxapp_json = json.load(fd) input_spec = dxapp_json.get('inputSpec') # convert to a dictionary. Each entry in the input spec # has {name, class} attributes. if input_spec is None: return {} # For each field name, return its patterns. # Make sure a pattern is legal, ignore illegal patterns. def is_legal_pattern(pattern): return "*" in pattern patterns_dict = {} for spec in input_spec: name = spec['name'] if 'patterns' in spec: patterns_dict[name] = [] for p in spec['patterns']: if is_legal_pattern(p): patterns_dict[name].append(p) return patterns_dict
def get_input_spec_patterns()
Extract the inputSpec patterns, if they exist -- modifed from dx-upload-all-outputs Returns a dict of all patterns, with keys equal to the respective input parameter names.
4.737505
3.43455
1.379367
file_key_descs, rest_hash = analyze_bash_vars(job_input_file, job_homedir) def string_of_elem(elem): result = None if isinstance(elem, basestring): result = elem elif isinstance(elem, dxpy.DXFile): result = json.dumps(dxpy.dxlink(elem)) else: result = json.dumps(elem) return pipes.quote(result) def string_of_value(val): if isinstance(val, list): string = " ".join([string_of_elem(vitem) for vitem in val]) return "( {} )".format(string) else: return string_of_elem(val) var_defs_hash = {} def gen_text_line_and_name_collision(key, val): ''' In the absence of a name collision, create a line describing a bash variable. ''' if check_name_collision: if key not in environ and key not in var_defs_hash: var_defs_hash[key] = val else: sys.stderr.write(dxpy.utils.printing.fill( "Creating environment variable ({}) would cause a name collision".format(key) ) + "\n") else: var_defs_hash[key] = val # Processing non-file variables before the file variables. This priorities them, # so that in case of name collisions, the file-variables will be dropped. for key, desc in list(rest_hash.items()): gen_text_line_and_name_collision(key, string_of_value(desc)) for file_key, desc in list(file_key_descs.items()): gen_text_line_and_name_collision(file_key, string_of_value(desc['handler'])) gen_text_line_and_name_collision(file_key + "_name", string_of_value(desc['basename'])) gen_text_line_and_name_collision(file_key + "_prefix", string_of_value(desc['prefix'])) gen_text_line_and_name_collision(file_key + "_path", string_of_value(desc['path'])) return var_defs_hash
def gen_bash_vars(job_input_file, job_homedir=None, check_name_collision=True)
:param job_input_file: path to a JSON file describing the job inputs :param job_homedir: path to home directory, used for testing purposes :param check_name_collision: should we check for name collisions? :return: list of lines :rtype: list of strings Calculates a line for each shell variable to instantiate. If *check_name_collision* is true, then detect and warn about collisions with essential environment variables.
3.199697
3.27967
0.975615
while True: try: future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX)) break except concurrent.futures.TimeoutError: pass except KeyboardInterrupt: if print_traceback: traceback.print_stack() else: print('') os._exit(os.EX_IOERR) return future
def wait_for_a_future(futures, print_traceback=False)
Return the next future that completes. If a KeyboardInterrupt is received, then the entire process is exited immediately. See wait_for_all_futures for more notes.
3.325901
3.062651
1.085955
try: while True: waited_futures = concurrent.futures.wait(futures, timeout=60) if len(waited_futures.not_done) == 0: break except KeyboardInterrupt: if print_traceback: traceback.print_stack() else: print('') os._exit(os.EX_IOERR)
def wait_for_all_futures(futures, print_traceback=False)
Wait indefinitely for all futures in the input iterable to complete. Use a timeout to enable interrupt handling. Call os._exit() in case of KeyboardInterrupt. Otherwise, the atexit registered handler in concurrent.futures.thread will run, and issue blocking join() on all worker threads, requiring us to listen to events in worker threads in order to enable timely exit in response to Ctrl-C. Note: This still doesn't handle situations where Ctrl-C is pressed elsewhere in the code and there are worker threads with long-running tasks. Note: os._exit() doesn't work well with interactive mode (e.g. ipython). This may help: import __main__ as main; if hasattr(main, '__file__'): os._exit() else: os.exit()
2.84518
2.726153
1.043661
tasks_in_progress = collections.deque() if max_active_tasks is None: max_active_tasks = cpu_count() # The following two functions facilitate GC by not adding extra variables to the enclosing scope. def submit_task(task_iterator, executor, futures_queue): retval = next(task_iterator, None) if retval is None: return False task_callable, task_args, task_kwargs = retval task_future = executor.submit(task_callable, *task_args, **task_kwargs) futures_queue.append(task_future) return True def next_result(tasks_in_progress): future = tasks_in_progress.popleft() try: result = future.result(timeout=THREAD_TIMEOUT_MAX) except KeyboardInterrupt: print('') os._exit(os.EX_IOERR) return result if do_first_task_sequentially: task_callable, task_args, task_kwargs = next(request_iterator) yield task_callable(*task_args, **task_kwargs) for _i in range(max_active_tasks): retval = submit_task(request_iterator, thread_pool, tasks_in_progress) if not retval: break while len(tasks_in_progress) > 0: result = next_result(tasks_in_progress) submit_task(request_iterator, thread_pool, tasks_in_progress) yield result del result
def response_iterator(request_iterator, thread_pool, max_active_tasks=None, do_first_task_sequentially=True)
:param request_iterator: An iterator producing inputs for consumption by the worker pool. :type request_iterator: iterator of callable, args, kwargs :param thread_pool: thread pool to submit the requests to :type thread_pool: concurrent.futures.thread.ThreadPoolExecutor :param max_active_tasks: The maximum number of tasks that may be either running or waiting for consumption of their result. If not given, defaults to the number of CPU cores on the machine. :type max_active_tasks: int :param do_first_task_sequentially: If True, executes (and returns the result of) the first request before submitting any other requests (the subsequent requests are submitted with *max_active_tasks* parallelism). :type do_first_task_sequentially: bool Rate-limited asynchronous multithreaded task runner. Consumes tasks from *request_iterator*. Yields their results in order, while allowing up to *max_active_tasks* to run simultaneously. Unlike concurrent.futures.Executor.map, prevents new tasks from starting while there are *max_active_tasks* or more unconsumed results.
2.577589
2.579559
0.999236
error_msg = 'Error: Expected an int timestamp, a date format (e.g. YYYY-MM-DD), or an int with a single-letter suffix (s=seconds, m=minutes, h=hours, d=days, w=weeks, M=months, y=years; e.g. "-10d" indicates 10 days ago); but got {t}' if isinstance(t, basestring) and t.isdigit(): t = int(t) if isinstance(t, basestring): try: t = normalize_timedelta(t) except ValueError: try: t = int(time.mktime(dateutil.parser.parse(t).timetuple())*1000) assert t > 0 except (ValueError, OverflowError, AssertionError): raise ValueError(error_msg.format(t=t)) elif isinstance(t, numbers.Integral): units_multipliers = {'ms': 1, 's': 1000} if default_unit not in units_multipliers: raise ValueError("Expected default_unit to be one of 's' or 'ms'") t = t * units_multipliers[default_unit] else: raise ValueError(error_msg.format(t=t)) now = int(time.time()*1000) if t < 0 or (future and t < now): t += now return t
def normalize_time_input(t, future=False, default_unit='ms')
:param default_unit: units of the input time *t*; must be one of "s" or "ms". This param is only respected if *t* looks like an int (e.g. "12345", 12345). :type default_unit: string Converts inputs such as: "2012-05-01" "-5d" 1352863174 "1352863174" to milliseconds since epoch. See http://labix.org/python-dateutil and :meth:`normalize_timedelta`.
2.819301
2.680958
1.051602
try: return int(timedelta) * 1000 except ValueError as e: t, suffix = timedelta[:-1], timedelta[-1:] suffix_multipliers = {'s': 1000, 'm': 1000*60, 'h': 1000*60*60, 'd': 1000*60*60*24, 'w': 1000*60*60*24*7, 'M': 1000*60*60*24*30, 'y': 1000*60*60*24*365} if suffix not in suffix_multipliers: raise ValueError() return int(t) * suffix_multipliers[suffix]
def normalize_timedelta(timedelta)
Given a string like "1w" or "-5d", convert it to an integer in milliseconds. Integers without a suffix are interpreted as seconds. Note: not related to the datetime timedelta class.
1.779959
1.687229
1.05496
d = {} for k, v in ordered_pairs: if k in d: raise ValueError("duplicate key: %r" % (k,)) else: d[k] = v return d
def _dict_raise_on_duplicates(ordered_pairs)
Reject duplicate keys.
2.349304
2.193688
1.070938
''' Static method to return a copy of the input dictionary with an additional unique nonce :param input: an input dictionary that may be empty :type input: dict :returns an extended copy of the input with an additional nonce field The input dictionary is updated with a nonce only if does not already have a non empty nonce ''' input_cp = input_params.copy() if len(input_cp.get('nonce', '')) == 0: input_cp['nonce'] = str(Nonce()) return input_cp
def update_nonce(input_params)
Static method to return a copy of the input dictionary with an additional unique nonce :param input: an input dictionary that may be empty :type input: dict :returns an extended copy of the input with an additional nonce field The input dictionary is updated with a nonce only if does not already have a non empty nonce
6.165174
1.950893
3.16018
# Inline description from a readme file if 'description' not in json_spec: readme_filename = None for filename in 'README.md', 'Readme.md', 'readme.md': if os.path.exists(os.path.join(src_dir, filename)): readme_filename = filename break if readme_filename is not None: with open(os.path.join(src_dir, readme_filename)) as fh: json_spec['description'] = fh.read() # Inline developerNotes from Readme.developer.md if 'developerNotes' not in json_spec: for filename in 'README.developer.md', 'Readme.developer.md', 'readme.developer.md': if os.path.exists(os.path.join(src_dir, filename)): with open(os.path.join(src_dir, filename)) as fh: json_spec['developerNotes'] = fh.read() break
def inline_documentation_files(json_spec, src_dir)
Modifies the provided json_spec dict (which may be an app, applet, workflow spec) to inline the contents of the readme file into "description" and the developer readme into "developerNotes".
1.878602
1.668696
1.125791
assert(prefixed_name.startswith('app-') or prefixed_name.startswith('globalworkflow-')) if prefixed_name.partition('-')[0] == 'app': exception_type = dxpy.app_builder.AppBuilderException describe_method = dxpy.api.app_describe exception_msg = \ 'An app with the given name already exists and you are not a developer of that app' else: exception_type = dxpy.workflow_builder.WorkflowBuilderException describe_method = dxpy.api.global_workflow_describe exception_msg = \ 'A global workflow with the given name already exists and you are not a developer of that workflow' name_already_exists = True is_developer = False version = None executable_id = None FoundExecutable = collections.namedtuple('FoundExecutable', ['name', 'version', 'id']) try: describe_output = describe_method(prefixed_name, input_params={"fields": {"isDeveloperFor": True, "version": True, "id": True}}) is_developer = describe_output['isDeveloperFor'] version = describe_output['version'] executable_id = describe_output['id'] except dxpy.exceptions.DXAPIError as e: if e.name == 'ResourceNotFound': name_already_exists = False elif e.name == 'PermissionDenied': raise exception_type(exception_msg) else: raise e if not name_already_exists: # This app/workflow doesn't exist yet so its creation will succeed # (or at least, not fail on the basis of the ACL). return FoundExecutable(name=None, version=None, id=None) name_without_prefix = prefixed_name.partition('-')[2] if not is_developer: raise exception_type('You are not a developer for {n}'.format(n=name_without_prefix)) return FoundExecutable(name=name_without_prefix, version=version, id=executable_id)
def verify_developer_rights(prefixed_name)
Checks if the current user is a developer of the app or global workflow with the given name. If the app/global workflow exists and the user has developer rights to it, the function returns a named tuple representing the executable that was queried.
2.920305
2.674053
1.092089
if from_spec is None or from_command_line is None: return if set(from_spec) != set(from_command_line): raise builder_exception("--region and the 'regionalOptions' key in the JSON file do not agree")
def assert_consistent_regions(from_spec, from_command_line, builder_exception)
Verifies the regions passed with --region CLI argument and the ones specified in regionalOptions are the same (if both CLI and spec were used)
5.433126
3.936424
1.380219
reg_options_spec = json_spec.get('regionalOptions') json_fn = 'dxapp.json' if exec_type == 'app' else 'dxworkflow.json' if not isinstance(reg_options_spec, dict): raise executable_builder_exeception("The field 'regionalOptions' in must be a mapping") if not reg_options_spec: raise executable_builder_exeception( "The field 'regionalOptions' in " + json_fn + " must be a non-empty mapping") regional_options_list = list(reg_options_spec.items()) for region, opts_for_region in regional_options_list: if not isinstance(opts_for_region, dict): raise executable_builder_exeception("The field 'regionalOptions['" + region + "']' in " + json_fn + " must be a mapping") if set(opts_for_region.keys()) != set(regional_options_list[0][1].keys()): if set(opts_for_region.keys()) - set(regional_options_list[0][1].keys()): with_key, without_key = region, regional_options_list[0][0] key_name = next(iter(set(opts_for_region.keys()) - set(regional_options_list[0][1].keys()))) else: with_key, without_key = regional_options_list[0][0], region key_name = next(iter(set(regional_options_list[0][1].keys()) - set(opts_for_region.keys()))) raise executable_builder_exeception( "All regions in regionalOptions must specify the same options; " + "%s was given for %s but not for %s" % (key_name, with_key, without_key) ) if exec_type == 'app': for key in opts_for_region: if key in json_spec.get('runSpec', {}): raise executable_builder_exeception( key + " cannot be given in both runSpec and in regional options for " + region)
def assert_consistent_reg_options(exec_type, json_spec, executable_builder_exeception)
Validates the "regionalOptions" field and verifies all the regions used in "regionalOptions" have the same options.
2.298343
2.208712
1.040581
from_spec = json_spec.get('regionalOptions') if from_spec is not None: assert_consistent_reg_options(exec_type, json_spec, executable_builder_exeception) assert_consistent_regions(from_spec, from_command_line, executable_builder_exeception) enabled_regions = None if from_spec is not None: enabled_regions = from_spec.keys() elif from_command_line is not None: enabled_regions = from_command_line return enabled_regions
def get_enabled_regions(exec_type, json_spec, from_command_line, executable_builder_exeception)
Return a list of regions in which the global executable (app or global workflow) will be enabled, based on the "regionalOption" in their JSON specification and/or --region CLI argument used with "dx build". :param exec_type: 'app' or 'globalworkflow' :type json_spec: str. :param json_spec: The contents of dxapp.json or dxworkflow.json :type json_spec: dict or None. :param from_command_line: The regional options specified on the command-line via --region. :type from_command_line: list or None. :param builder_exception: Exception that will be thrown. :type builder_exception: AppBuilderException or WorkflowBuilderException.
2.799703
2.742345
1.020916
describe_input = {} if fields is not None: describe_input['fields'] = fields self._desc = dxpy.api.analysis_describe(self._dxid, describe_input, **kwargs) return self._desc
def describe(self, fields=None, **kwargs)
:param fields: dict where the keys are field names that should be returned, and values should be set to True (by default, all fields are returned) :type fields: dict :returns: Description of the analysis :rtype: dict Returns a hash with key-value pairs containing information about the analysis
4.047696
5.218455
0.77565
dxpy.api.analysis_add_tags(self._dxid, {"tags": tags}, **kwargs)
def add_tags(self, tags, **kwargs)
:param tags: Tags to add to the analysis :type tags: list of strings Adds each of the specified tags to the analysis. Takes no action for tags that are already listed for the analysis.
7.307796
5.738739
1.273415
dxpy.api.analysis_remove_tags(self._dxid, {"tags": tags}, **kwargs)
def remove_tags(self, tags, **kwargs)
:param tags: Tags to remove from the analysis :type tags: list of strings Removes each of the specified tags from the analysis. Takes no action for tags that the analysis does not currently have.
7.772914
6.090889
1.276154
dxpy.api.analysis_set_properties(self._dxid, {"properties": properties}, **kwargs)
def set_properties(self, properties, **kwargs)
:param properties: Property names and values given as key-value pairs of strings :type properties: dict Given key-value pairs in *properties* for property names and values, the properties are set on the analysis for the given property names. Any property with a value of :const:`None` indicates the property will be deleted. .. note:: Any existing properties not mentioned in *properties* are not modified by this method.
7.874678
8.254933
0.953936
''' :param field: Output field name of this analysis :type field: string :param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array :type index: int :param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome" :type metadata: string Returns a dict containing a valid reference to an output of this analysis. ''' link = {"$dnanexus_link": {"analysis": self._dxid, "field": field}} if index is not None: link["$dnanexus_link"]["index"] = index if metadata is not None: link["$dnanexus_link"]["metadata"] = metadata return link
def get_output_ref(self, field, index=None, metadata=None)
:param field: Output field name of this analysis :type field: string :param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array :type index: int :param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome" :type metadata: string Returns a dict containing a valid reference to an output of this analysis.
4.659899
1.550601
3.00522
for input_field in app_json.get('inputSpec', []): for suggestion in input_field.get('suggestions', []): if 'project' in suggestion: try: project = dxpy.api.project_describe(suggestion['project'], {"permissions": True}) if 'PUBLIC' not in project['permissions'] and publish: logger.warn('Project {name} NOT PUBLIC!'.format(name=project['name'])) except dxpy.exceptions.DXAPIError as e: if e.code == 404: logger.warn('Suggested project {name} does not exist, or not accessible by user'.format( name=suggestion['project'])) if 'path' in suggestion: try: check_folder_exists(suggestion['project'], suggestion['path'], '') except ResolutionError as e: logger.warn('Folder {path} could not be found in project {project}'.format( path=suggestion['path'], project=suggestion['project'])) if '$dnanexus_link' in suggestion: if suggestion['$dnanexus_link'].startswith(('file-', 'record-')): try: dnanexus_link = dxpy.describe(suggestion['$dnanexus_link']) except dxpy.exceptions.DXAPIError as e: if e.code == 404: raise dxpy.app_builder.AppBuilderException( 'Suggested object {name} could not be found'.format( name=suggestion['$dnanexus_link'])) except Exception as e: raise dxpy.app_builder.AppBuilderException(str(e)) if 'value' in suggestion and isinstance(suggestion["value"], dict): if '$dnanexus_link' in suggestion['value']: # Check if we have JSON or string if isinstance(suggestion['value']['$dnanexus_link'], dict): if 'project' in suggestion['value']['$dnanexus_link']: try: dxpy.api.project_describe(suggestion['value']['$dnanexus_link']['project']) except dxpy.exceptions.DXAPIError as e: if e.code == 404: logger.warn('Suggested project {name} does not exist, or not accessible by user'.format( name=suggestion['value']['$dnanexus_link']['project'])) elif isinstance(suggestion['value']['$dnanexus_link'], basestring): if suggestion['value']['$dnanexus_link'].startswith(('file-', 'record-')): try: dnanexus_link = dxpy.describe(suggestion['value']['$dnanexus_link']) except dxpy.exceptions.DXAPIError as e: if e.code == 404: raise dxpy.app_builder.AppBuilderException( 'Suggested object {name} could not be found'.format( name=suggestion['value']['$dnanexus_link'])) except Exception as e: raise dxpy.app_builder.AppBuilderException(str(e))
def _check_suggestions(app_json, publish=False)
Examines the specified dxapp.json file and warns about any violations of suggestions guidelines. :raises: AppBuilderException for data objects that could not be found
2.031843
1.994905
1.018517
# This function needs the language to be explicitly set, so we can # generate an appropriate temp filename. if lang == 'python2.7': temp_basename = 'inlined_code_from_dxapp_json.py' elif lang == 'bash': temp_basename = 'inlined_code_from_dxapp_json.sh' else: raise ValueError('lang must be one of "python2.7" or "bash"') # Dump the contents out to a temporary file, then call _check_file_syntax. with open(os.path.join(temp_dir, temp_basename), 'w') as ofile: ofile.write(code) _check_file_syntax(os.path.join(temp_dir, temp_basename), temp_dir, override_lang=lang, enforce=enforce)
def _check_syntax(code, lang, temp_dir, enforce=True)
Checks that the code whose text is in CODE parses as LANG. Raises DXSyntaxError if there is a problem and "enforce" is True.
3.605463
3.558195
1.013284
def check_python(filename): # Generate a semi-recognizable name to write the pyc to. Of # course it's possible that different files being scanned could # have the same basename, so this path won't be unique, but the # checks don't run concurrently so this shouldn't cause any # problems. pyc_path = os.path.join(temp_dir, os.path.basename(filename) + ".pyc") try: if USING_PYTHON2: filename = filename.encode(sys.getfilesystemencoding()) py_compile.compile(filename, cfile=pyc_path, doraise=True) finally: try: os.unlink(pyc_path) except OSError: pass def check_bash(filename): if platform.system() == 'Windows': logging.warn( 'Skipping bash syntax check due to unavailability of bash on Windows.') else: subprocess.check_output(["/bin/bash", "-n", filename], stderr=subprocess.STDOUT) if override_lang == 'python2.7': checker_fn = check_python elif override_lang == 'bash': checker_fn = check_bash elif filename.endswith('.py'): checker_fn = check_python elif filename.endswith('.sh'): checker_fn = check_bash else: # Ignore other kinds of files. return # Do a test read of the file to catch errors like the file not # existing or not being readable. open(filename) try: checker_fn(filename) except subprocess.CalledProcessError as e: print(filename + " has a syntax error! Interpreter output:", file=sys.stderr) for line in e.output.strip("\n").split("\n"): print(" " + line.rstrip("\n"), file=sys.stderr) if enforce: raise DXSyntaxError(filename + " has a syntax error") except py_compile.PyCompileError as e: print(filename + " has a syntax error! Interpreter output:", file=sys.stderr) print(" " + e.msg.strip(), file=sys.stderr) if enforce: raise DXSyntaxError(e.msg.strip())
def _check_file_syntax(filename, temp_dir, override_lang=None, enforce=True)
Checks that the code in FILENAME parses, attempting to autodetect the language if necessary. Raises IOError if the file cannot be read. Raises DXSyntaxError if there is a problem and "enforce" is True.
3.343191
3.266884
1.023358
temp_dir = tempfile.mkdtemp(prefix='dx-build_tmp') try: _verify_app_source_dir_impl(src_dir, temp_dir, mode, enforce=enforce) finally: shutil.rmtree(temp_dir)
def _verify_app_source_dir(src_dir, mode, enforce=True)
Performs syntax and lint checks on the app source. Precondition: the dxapp.json file exists and can be parsed.
3.117031
3.131088
0.99551
if not os.path.isdir(src_dir): parser.error("%s is not a directory" % src_dir) if not os.path.exists(os.path.join(src_dir, "dxapp.json")): raise dxpy.app_builder.AppBuilderException("Directory %s does not contain dxapp.json: not a valid DNAnexus app source directory" % src_dir) with open(os.path.join(src_dir, "dxapp.json")) as app_desc: try: return json_load_raise_on_duplicates(app_desc) except Exception as e: raise dxpy.app_builder.AppBuilderException("Could not parse dxapp.json file as JSON: " + str(e.args))
def _parse_app_spec(src_dir)
Returns the parsed contents of dxapp.json. Raises either AppBuilderException or a parser error (exit codes 3 or 2 respectively) if this cannot be done.
2.49126
2.255058
1.104744
if len(sys.argv) > 0: if sys.argv[0].endswith('dx-build-app'): logging.warn('Warning: dx-build-app has been replaced with "dx build --create-app". Please update your scripts.') elif sys.argv[0].endswith('dx-build-applet'): logging.warn('Warning: dx-build-applet has been replaced with "dx build". Please update your scripts.') exit(0)
def main(**kwargs)
Entry point for dx-build-app(let). Don't call this function as a subroutine in your program! It is liable to sys.exit your program when it detects certain error conditions, so you can't recover from those as you could if it raised exceptions. Instead, call dx_build_app.build_and_upload_locally which provides the real implementation for dx-build-app(let) but is easier to use in your program.
3.80626
3.354165
1.134786
''' :param dxid: App ID :type dxid: string :param name: App name :type name: string :param alias: App version or tag :type alias: string :raises: :exc:`~dxpy.exceptions.DXError` if *dxid* and some other input are both given or if neither *dxid* nor *name* are given Discards the currently stored ID and associates the handler with the requested parameters. Note that if *dxid* is given, the other fields should not be given, and if *name* is given, *alias* has default value "default". ''' self._dxid = None self._name = None self._alias = None if dxid is not None: if name is not None or alias is not None: raise DXError("Did not expect name or alias to be given if dxid is given") verify_string_dxid(dxid, self._class) self._dxid = dxid elif name is not None: self._name = name if not isinstance(name, basestring): raise DXError("App name needs to be a string: %r" % (name,)) if alias is not None: if not isinstance(alias, basestring): raise DXError("App alias needs to be a string: %r" % (alias,)) self._alias = alias else: self._alias = 'default'
def set_id(self, dxid=None, name=None, alias=None)
:param dxid: App ID :type dxid: string :param name: App name :type name: string :param alias: App version or tag :type alias: string :raises: :exc:`~dxpy.exceptions.DXError` if *dxid* and some other input are both given or if neither *dxid* nor *name* are given Discards the currently stored ID and associates the handler with the requested parameters. Note that if *dxid* is given, the other fields should not be given, and if *name* is given, *alias* has default value "default".
3.041626
1.585347
1.918587
''' :returns: Object ID of associated app :rtype: string Returns the object ID of the app that the handler is currently associated with. ''' if self._dxid is not None: return self._dxid else: return 'app-' + self._name + '/' + self._alias
def get_id(self)
:returns: Object ID of associated app :rtype: string Returns the object ID of the app that the handler is currently associated with.
8.78933
3.441399
2.553999
''' :param fields: Hash where the keys are field names that should be returned, and values should be set to True (default is that all fields are returned) :type fields: dict :returns: Description of the remote app object :rtype: dict Returns a dict with a description of the app. The result includes the key-value pairs as specified in the API documentation for the `/app-xxxx/describe <https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fdescribe>`_ method. ''' describe_input = {} if fields: describe_input['fields'] = fields if self._dxid is not None: self._desc = dxpy.api.app_describe(self._dxid, input_params=describe_input, **kwargs) else: self._desc = dxpy.api.app_describe('app-' + self._name, alias=self._alias, input_params=describe_input, **kwargs) return self._desc
def describe(self, fields=None, **kwargs)
:param fields: Hash where the keys are field names that should be returned, and values should be set to True (default is that all fields are returned) :type fields: dict :returns: Description of the remote app object :rtype: dict Returns a dict with a description of the app. The result includes the key-value pairs as specified in the API documentation for the `/app-xxxx/describe <https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fdescribe>`_ method.
4.013118
1.657451
2.421259
''' :param applet: ID of the applet to replace the app's contents with :type applet: string :param details: Metadata to store with the app (optional) :type details: dict or list :param access: Access specification (optional) :type access: dict :param resources: Specifies what is to be put into the app's resources container. Must be a string containing a project ID, or a list containing object IDs. (optional) :type resources: string or list Updates the parameters of an existing app. See the API documentation for the `/app/update <https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fupdate>`_ method for more info. The current user must be a developer of the app. ''' updates = {} for field in 'applet', 'billing', 'access', 'resources', 'details': if field in kwargs: updates[field] = kwargs[field] del kwargs[field] if self._dxid is not None: resp = dxpy.api.app_update(self._dxid, input_params=updates, **kwargs) else: resp = dxpy.api.app_update('app-' + self._name, alias=self._alias, input_params=updates, **kwargs)
def update(self, **kwargs)
:param applet: ID of the applet to replace the app's contents with :type applet: string :param details: Metadata to store with the app (optional) :type details: dict or list :param access: Access specification (optional) :type access: dict :param resources: Specifies what is to be put into the app's resources container. Must be a string containing a project ID, or a list containing object IDs. (optional) :type resources: string or list Updates the parameters of an existing app. See the API documentation for the `/app/update <https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fupdate>`_ method for more info. The current user must be a developer of the app.
4.683519
1.637148
2.86078
if self._dxid is not None: return dxpy.api.app_add_tags(self._dxid, input_params={"tags": tags}, **kwargs) else: return dxpy.api.app_add_tags('app-' + self._name, alias=self._alias, input_params={"tags": tags}, **kwargs)
def add_tags(self, tags, **kwargs)
:param tags: Tags to add to the app :type tags: array Adds the specified application name tags (aliases) to this app. The current user must be a developer of the app.
3.787174
3.60871
1.049454
if self._dxid is not None: return dxpy.api.app_remove_tags(self._dxid, input_params={"tags": tags}, **kwargs) else: return dxpy.api.app_remove_tags('app-' + self._name, alias=self._alias, input_params={"tags": tags}, **kwargs)
def remove_tags(self, tags, **kwargs)
:param tags: Tags to remove from the app :type tags: array Removes the specified application name tags (aliases) from this app, so that it is no longer addressable by those aliases. The current user must be a developer of the app.
3.737496
3.512062
1.064188
if self._dxid is not None: return dxpy.api.app_install(self._dxid, **kwargs) else: return dxpy.api.app_install('app-' + self._name, alias=self._alias, **kwargs)
def install(self, **kwargs)
Installs the app in the current user's account.
4.270914
3.568181
1.196944
if self._dxid is not None: return dxpy.api.app_uninstall(self._dxid, **kwargs) else: return dxpy.api.app_uninstall('app-' + self._name, alias=self._alias, **kwargs)
def uninstall(self, **kwargs)
Uninstalls the app from the current user's account.
3.937091
3.532838
1.114427
if self._dxid is not None: return dxpy.api.app_get(self._dxid, **kwargs) else: return dxpy.api.app_get('app-' + self._name, alias=self._alias, **kwargs)
def get(self, **kwargs)
:returns: Full specification of the remote app object :rtype: dict Returns the contents of the app. The result includes the key-value pairs as specified in the API documentation for the `/app-xxxx/get <https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fget>`_ method.
4.841805
3.794583
1.275978
if self._dxid is not None: return dxpy.api.app_delete(self._dxid, **kwargs) else: return dxpy.api.app_delete('app-' + self._name, alias=self._alias, **kwargs)
def delete(self, **kwargs)
Removes this app object from the platform. The current user must be a developer of the app.
4.061911
3.651905
1.112272
# Rename app_input to preserve API compatibility when calling # DXApp.run(app_input=...). return super(DXApp, self).run(app_input, *args, **kwargs)
def run(self, app_input, *args, **kwargs)
Creates a new job that executes the function "main" of this app with the given input *app_input*. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for the available args.
7.535227
7.486384
1.006524
if not os.path.isdir(src_dir): parser.error("{} is not a directory".format(src_dir)) if not os.path.exists(os.path.join(src_dir, json_file_name)): raise WorkflowBuilderException( "Directory {} does not contain dxworkflow.json: not a valid DNAnexus workflow source directory" .format(src_dir)) with open(os.path.join(src_dir, json_file_name)) as desc: try: return json_load_raise_on_duplicates(desc) except Exception as e: raise WorkflowBuilderException("Could not parse {} file as JSON: {}".format(json_file_name, e.args))
def _parse_executable_spec(src_dir, json_file_name, parser)
Returns the parsed contents of a json specification. Raises WorkflowBuilderException (exit code 3) if this cannot be done.
3.0929
2.80221
1.103736
if build_project_id: return build_project_id if 'project' in json_spec: return json_spec['project'] if dxpy.WORKSPACE_ID: return dxpy.WORKSPACE_ID error_msg = "Can't create a workflow without specifying a destination project; " error_msg += "please use the -d/--destination flag to explicitly specify a project" raise WorkflowBuilderException(error_msg)
def _get_destination_project(json_spec, args, build_project_id=None)
Returns destination project in which the workflow should be created. In can be set in multiple ways whose order of precedence is: 1. --destination, -d option supplied with `dx build`, 2. 'project' specified in the json file, 3. project set in the dxpy.WORKSPACE_ID environment variable.
3.350486
2.604517
1.286413
dest_folder = folder_name or json_spec.get('folder') or '/' if not dest_folder.endswith('/'): dest_folder = dest_folder + '/' return dest_folder
def _get_destination_folder(json_spec, folder_name=None)
Returns destination project in which the workflow should be created. It can be set in the json specification or by --destination option supplied with `dx build`. The order of precedence is: 1. --destination, -d option, 2. 'folder' specified in the json file.
2.747652
3.197948
0.859192
assert(isinstance(categories_to_set, list)) existing_categories = dxpy.api.global_workflow_list_categories(global_workflow_id)['categories'] categories_to_add = set(categories_to_set).difference(set(existing_categories)) categories_to_remove = set(existing_categories).difference(set(categories_to_set)) if categories_to_add: dxpy.api.global_workflow_add_categories(global_workflow_id, input_params={'categories': list(categories_to_add)}) if categories_to_remove: dxpy.api.global_workflow_remove_categories(global_workflow_id, input_params={'categories': list(categories_to_remove)})
def _set_categories_on_workflow(global_workflow_id, categories_to_set)
Note: Categories are set on the workflow series level, i.e. the same set applies to all versions.
1.7035
1.70477
0.999255
requested_name = json_spec['name'] requested_version = json_spec['version'] if requested_name == name and requested_version == version: return True else: try: desc_output = dxpy.api.global_workflow_describe('globalworkflow-' + json_spec['name'], alias=json_spec['version'], input_params={"fields": {"name": True, "version": True}}) return desc_output['name'] == json_spec['name'] and desc_output['version'] == json_spec['version'] except dxpy.exceptions.DXAPIError: return False except: raise
def _version_exists(json_spec, name=None, version=None)
Returns True if a global workflow with the given name and version already exists in the platform and the user has developer rights to the workflow. "name" and "version" can be passed if we already made a "describe" API call on the global workflow and so know the requested name and version already exists.
3.280507
2.913586
1.125935
if not isinstance(ignore_reuse_stages, list): raise WorkflowBuilderException('"IgnoreReuse must be a list of strings - stage IDs or "*"') ignore_reuse_set = set(ignore_reuse_stages) if '*' in ignore_reuse_set and ignore_reuse_set == 1: return stage_ids = set([stage.get('id') for stage in stages]) for ignored in ignore_reuse_set: if ignored not in stage_ids: raise WorkflowBuilderException( 'Stage with ID {} not found. Add a matching "id" for the stage you wish to set ignoreReuse for'.format(ignored))
def validate_ignore_reuse(stages, ignore_reuse_stages)
Checks if each stage ID specified in ignore_reuse_stages exists in the workflow definition. If ignore_reuse_stages contains only '*', the field is valid.
4.718874
4.304155
1.096353
if not isinstance(stages, list): raise WorkflowBuilderException("Stages must be specified as a list of dictionaries") validated_stages = [] for index, stage in enumerate(stages): validated_stages.append(_get_validated_stage(stage, index)) return validated_stages
def _get_validated_stages(stages)
Validates stages of the workflow as a list of dictionaries.
2.72323
2.228382
1.222066
validated = {} override_project_id, override_folder, override_workflow_name = \ dxpy.executable_builder.get_parsed_destination(args.destination) validated['project'] = _get_destination_project(json_spec, args, override_project_id) validated['folder'] = _get_destination_folder(json_spec, override_folder) workflow_name = _get_workflow_name(json_spec, override_workflow_name) if not workflow_name: print('Warning: workflow name is not specified') else: validated['name'] = workflow_name return validated
def _validate_json_for_regular_workflow(json_spec, args)
Validates fields used only for building a regular, project-based workflow.
3.572081
3.519372
1.014977
# TODO: verify the billTo can build the workflow # TODO: if the global workflow build fails add an option to interactively change billto # TODO: (or other simple fields) instead of failing altogether # TODO: get a confirmation before building a workflow that may be costly if 'name' not in json_spec: raise WorkflowBuilderException( "dxworkflow.json contains no 'name' field, but it is required to build a global workflow") if not dxpy.executable_builder.GLOBAL_EXEC_NAME_RE.match(json_spec['name']): raise WorkflowBuilderException( "The name of your workflow must match /^[a-zA-Z0-9._-]+$/") if json_spec['name'] != json_spec['name'].lower(): logger.warn('workflow name "{}" should be all lowercase'.format(json_spec['name'])) if 'version' not in json_spec: raise WorkflowBuilderException( "dxworkflow.json contains no 'version' field, but it is required to build a global workflow") if not dxpy.executable_builder.GLOBAL_EXEC_VERSION_RE.match(json_spec['version']): logger.warn('"version" {} should be semver compliant (e.g. of the form X.Y.Z)'.format(json_spec['version'])) if 'details' in json_spec: if not isinstance(json_spec['details'], dict): raise WorkflowBuilderException( 'The field "details" must be a dictionary') if 'regionalOptions' in json_spec: if not (isinstance(json_spec['regionalOptions'], dict) and json_spec['regionalOptions'] and all([isinstance(i, dict) for i in json_spec['regionalOptions'].values()])): raise WorkflowBuilderException( 'The field "regionalOptions" must be a non-empty dictionary whose values are dictionaries') if args.bill_to: json_spec["billTo"] = args.bill_to
def _validate_json_for_global_workflow(json_spec, args)
Validates fields used for building a global workflow. Since building a global workflow is done after all the underlying workflows are built, which may be time-consuming, we validate as much as possible here.
3.260141
3.169051
1.028744
if not json_spec: return if not args: return validated_spec = copy.deepcopy(json_spec) # print ignored keys if present in json_spec unsupported_keys = _get_unsupported_keys(validated_spec.keys(), SUPPORTED_KEYS) if len(unsupported_keys) > 0: logger.warn( "Warning: the following root level fields are not supported and will be ignored: {}" .format(", ".join(unsupported_keys))) if 'stages' in validated_spec: validated_spec['stages'] = _get_validated_stages(validated_spec['stages']) if 'name' in validated_spec: if args.src_dir != validated_spec['name']: logger.warn( 'workflow name "%s" does not match containing directory "%s"' % (validated_spec['name'], args.src_dir)) if 'ignoreReuse' in validated_spec: validate_ignore_reuse(validated_spec['stages'], validated_spec['ignoreReuse']) validated_documentation_fields = _get_validated_json_for_build_or_update(validated_spec, args) validated_spec.update(validated_documentation_fields) # Project-based workflow specific validation if args.mode == 'workflow': validated = _validate_json_for_regular_workflow(json_spec, args) validated_spec.update(validated) # Global workflow specific validation if args.mode == 'globalworkflow': _validate_json_for_global_workflow(validated_spec, args) return validated_spec
def _get_validated_json(json_spec, args)
Validates dxworkflow.json and returns the json that can be sent with the /workflow/new API or /globalworkflow/new request.
3.637955
3.375006
1.077911
validated = copy.deepcopy(json_spec) dxpy.executable_builder.inline_documentation_files(validated, args.src_dir) if 'title' not in json_spec: logger.warn("dxworkflow.json is missing a title, please add one in the 'title' field") if 'summary' not in json_spec: logger.warn("dxworkflow.json is missing a summary, please add one in the 'summary' field") else: if json_spec['summary'].endswith('.'): logger.warn("summary {} should be a short phrase not ending in a period".format(json_spec['summary'],)) return validated
def _get_validated_json_for_build_or_update(json_spec, args)
Validates those fields that can be used when either building a new version (of a local, project-based workflow) or updating an existing version (of a global workflow).
4.268427
4.202961
1.015576
executables = [i.get("executable") for i in workflow_spec.get("stages")] for exect in executables: if exect.startswith("applet-") and len(workflow_enabled_regions) > 1: raise WorkflowBuilderException("Building a global workflow with applets in more than one region is not yet supported.") elif exect.startswith("app-"): app_regional_options = dxpy.api.app_describe(exect, input_params={"fields": {"regionalOptions": True}}) app_regions = set(app_regional_options['regionalOptions'].keys()) if not workflow_enabled_regions.issubset(app_regions): additional_workflow_regions = workflow_enabled_regions - app_regions mesg = "The app {} is enabled in regions {} while the global workflow in {}.".format( exect, ", ".join(app_regions), ", ".join(workflow_enabled_regions)) mesg += " The workflow will not be able to run in {}.".format(", ".join(additional_workflow_regions)) mesg += " If you are a developer of the app, you can enable the app in {} to run the workflow in that region(s).".format( ", ".join(additional_workflow_regions)) logger.warn(mesg) elif exect.startswith("workflow-"): # We recurse to check the regions of the executables of the inner workflow inner_workflow_spec = dxpy.api.workflow_describe(exect) _assert_executable_regions_match(workflow_enabled_regions, inner_workflow_spec) elif exect.startswith("globalworkflow-"): raise WorkflowBuilderException("Building a global workflow with nested global workflows is not yet supported")
def _assert_executable_regions_match(workflow_enabled_regions, workflow_spec)
Check if the global workflow regions and the regions of stages (apps) match. If the workflow contains any applets, the workflow can be currently enabled in only one region - the region in which the applets are stored.
3.377924
3.167192
1.066536
workflow_id = dxpy.api.workflow_new(json_spec)["id"] dxpy.api.workflow_close(workflow_id) return workflow_id
def _build_regular_workflow(json_spec)
Precondition: json_spec must be validated
3.364978
3.453675
0.974318
enabled_regions = dxpy.executable_builder.get_enabled_regions('globalworkflow', json_spec, from_command_line, WorkflowBuilderException) if not enabled_regions: enabled_regions = [] if not dxpy.WORKSPACE_ID: msg = "A context project must be selected to enable a workflow in the project's region." msg += " You can use 'dx select' to select a project. Otherwise you can use --region option" msg += " to select a region in which the workflow should be enabled" raise(WorkflowBuilderException(msg)) region = dxpy.api.project_describe(dxpy.WORKSPACE_ID, input_params={"fields": {"region": True}})["region"] enabled_regions.append(region) if not enabled_regions: raise AssertionError("This workflow should be enabled in at least one region") return set(enabled_regions)
def _get_validated_enabled_regions(json_spec, from_command_line)
Returns a set of regions (region names) in which the global workflow should be enabled. Also validates and synchronizes the regions passed via CLI argument and in the regionalOptions field.
4.306735
4.05547
1.061957
# Create one temp project in each region projects_by_region = {} # Project IDs by region for region in enabled_regions: try: project_input = {"name": "Temporary build project for dx build global workflow", "region": region} if args.bill_to: project_input["billTo"] = args.bill_to temp_project = dxpy.api.project_new(project_input)["id"] projects_by_region[region] = temp_project logger.debug("Created temporary project {} to build in".format(temp_project)) except: # Clean up any temp projects that might have been created if projects_by_region: dxpy.executable_builder.delete_temporary_projects(projects_by_region.values()) err_exit() return projects_by_region
def _create_temporary_projects(enabled_regions, args)
Creates a temporary project needed to build an underlying workflow for a global workflow. Returns a dictionary with region names as keys and project IDs as values The regions in which projects will be created can be: i. regions specified in dxworkflow.json "regionalOptions" ii. regions specified as an argument to "dx build" iii. current context project, if None of the above are set If both args and dxworkflow.json specify regions, they must match.
3.836937
3.443338
1.114307
projects_by_region = _create_temporary_projects(enabled_regions, args) workflows_by_region = {} try: for region, project in projects_by_region.items(): json_spec['project'] = project workflow_id = _build_regular_workflow(json_spec) logger.debug("Created workflow " + workflow_id + " successfully") workflows_by_region[region] = workflow_id except: # Clean up if projects_by_region: dxpy.executable_builder.delete_temporary_projects(projects_by_region.values()) raise return workflows_by_region, projects_by_region
def _build_underlying_workflows(enabled_regions, json_spec, args)
Creates a workflow in a temporary project for each enabled region. Returns a tuple of dictionaries: workflow IDs by region and project IDs by region. The caller is responsible for destroying the projects if this method returns properly.
3.240301
2.743394
1.181128
# First determine in which regions the global workflow needs to be available enabled_regions = _get_validated_enabled_regions(json_spec, args.region) # Verify all the stages are also enabled in these regions # TODO: Add support for dx building multi-region global workflows with applets _assert_executable_regions_match(enabled_regions, json_spec) workflows_by_region, projects_by_region = {}, {} # IDs by region try: # prepare "regionalOptions" field for the globalworkflow/new input workflows_by_region, projects_by_region = \ _build_underlying_workflows(enabled_regions, json_spec, args) regional_options = {} for region, workflow_id in workflows_by_region.items(): regional_options[region] = {'workflow': workflow_id} json_spec.update({'regionalOptions': regional_options}) # leave only fields that are actually used to build the workflow gwf_provided_keys = GLOBALWF_SUPPORTED_KEYS.intersection(set(json_spec.keys())) gwf_final_json = dict((k, v) for k, v in json_spec.items() if k in gwf_provided_keys) # we don't want to print the whole documentation to the screen so we'll remove these fields print_spec = copy.deepcopy(gwf_final_json) if "description" in gwf_final_json: del print_spec["description"] if "developerNotes" in gwf_final_json: del print_spec["developerNotes"] logger.info("Will create global workflow with spec: {}".format(json.dumps(print_spec))) # Create a new global workflow version on the platform global_workflow_id = dxpy.api.global_workflow_new(gwf_final_json)["id"] logger.info("Uploaded global workflow {n}/{v} successfully".format(n=gwf_final_json["name"], v=gwf_final_json["version"])) logger.info("You can publish this workflow with:") logger.info(" dx publish {n}/{v}".format(n=gwf_final_json["name"], v=gwf_final_json["version"])) finally: # Clean up if projects_by_region: dxpy.executable_builder.delete_temporary_projects(projects_by_region.values()) # Set any additional fields on the created workflow try: _set_categories_on_workflow(global_workflow_id, gwf_final_json.get("categories", [])) except: logger.warn( "The workflow {n}/{v} was created but setting categories failed".format(n=gwf_final_json['name'], v=gwf_final_json['version'])) raise return global_workflow_id
def _build_global_workflow(json_spec, args)
Creates a workflow in a temporary project for each enabled region and builds a global workflow on the platform based on these workflows.
3.98605
3.838373
1.038474
try: if args.mode == 'workflow': json_spec = _get_validated_json(json_spec, args) workflow_id = _build_regular_workflow(json_spec) elif args.mode == 'globalworkflow': # Verify if the global workflow already exists and if the user has developer rights to it # If the global workflow name doesn't exist, the user is free to build it # If the name does exist two things can be done: # * either update the requested version, if this version already exists # * or create the version if it doesn't exist existing_workflow = dxpy.executable_builder.verify_developer_rights('globalworkflow-' + json_spec['name']) if existing_workflow and _version_exists(json_spec, existing_workflow.name, existing_workflow.version): workflow_id = _update_global_workflow(json_spec, args, existing_workflow.id) else: json_spec = _get_validated_json(json_spec, args) workflow_id = _build_global_workflow(json_spec, args) else: raise WorkflowBuilderException("Unrecognized workflow type: {}".format(args.mode)) except dxpy.exceptions.DXAPIError as e: raise e return workflow_id
def _build_or_update_workflow(json_spec, args)
Creates or updates a workflow on the platform. Returns the workflow ID, or None if the workflow cannot be created.
3.991158
3.977825
1.003352
if args is None: raise Exception("arguments not provided") try: json_spec = _parse_executable_spec(args.src_dir, "dxworkflow.json", parser) workflow_id = _build_or_update_workflow(json_spec, args) _print_output(workflow_id, args) except WorkflowBuilderException as e: print("Error: %s" % (e.args,), file=sys.stderr) sys.exit(3)
def build(args, parser)
Validates workflow source directory and creates a new (global) workflow based on it. Raises: WorkflowBuilderException if the workflow cannot be created.
5.187651
4.703473
1.102941
''' Copy the source to a destination that does not currently exist. This involves creating the target file/folder. ''' # Destination folder path is new => renaming if len(args.sources) != 1: # Can't copy and rename more than one object raise DXCLIError('The destination folder does not exist') last_slash_pos = get_last_pos_of_char('/', dest_path) if last_slash_pos == 0: dest_folder = '/' else: dest_folder = dest_path[:last_slash_pos] dest_name = dest_path[last_slash_pos + 1:].replace('\/', '/') try: dx_dest.list_folder(folder=dest_folder, only='folders') except dxpy.DXAPIError as details: if details.code == requests.codes['not_found']: raise DXCLIError('The destination folder does not exist') else: raise except: err_exit() # Clone and rename either the data object or the folder. # src_result is None if it could not be resolved to an object. src_proj, src_path, src_results = try_call(resolve_existing_path, args.sources[0], allow_mult=True, all_mult=args.all) if src_proj == dest_proj: if is_hashid(args.sources[0]): # This is the only case in which the source project is # purely assumed, so give a better error message. raise DXCLIError(fill('Error: You must specify a source project for ' + args.sources[0])) else: raise DXCLIError(fill('A source path and the destination path resolved to the ' + 'same project or container. Please specify different source ' + 'and destination containers, e.g.') + '\n dx cp source-project:source-id-or-path dest-project:dest-path') if src_results is None: try: contents = dxpy.api.project_list_folder(src_proj, {"folder": src_path, "includeHidden": True}) dxpy.api.project_new_folder(dest_proj, {"folder": dest_path}) exists = dxpy.api.project_clone(src_proj, {"folders": contents['folders'], "objects": [result['id'] for result in contents['objects']], "project": dest_proj, "destination": dest_path})['exists'] if len(exists) > 0: print(fill('The following objects already existed in the destination ' + 'container and were not copied:') + '\n ' + '\n '.join(exists)) return except: err_exit() else: try: exists = dxpy.api.project_clone(src_proj, {"objects": [result['id'] for result in src_results], "project": dest_proj, "destination": dest_folder})['exists'] if len(exists) > 0: print(fill('The following objects already existed in the destination ' + 'container and were not copied:') + '\n ' + '\n '.join(exists)) for result in src_results: if result['id'] not in exists: dxpy.DXHTTPRequest('/' + result['id'] + '/rename', {"project": dest_proj, "name": dest_name}) return except: err_exit()
def cp_to_noexistent_destination(args, dest_path, dx_dest, dest_proj)
Copy the source to a destination that does not currently exist. This involves creating the target file/folder.
3.692101
3.536364
1.044039
"Returns the file size in readable form." B = num_bytes KB = float(1024) MB = float(KB * 1024) GB = float(MB * 1024) TB = float(GB * 1024) if B < KB: return '{0} {1}'.format(B, 'bytes' if B != 1 else 'byte') elif KB <= B < MB: return '{0:.2f} KiB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MiB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GiB'.format(B/GB) elif TB <= B: return '{0:.2f} TiB'.format(B/TB)
def _readable_part_size(num_bytes)
Returns the file size in readable form.
1.548415
1.501581
1.03119
if media_type is not None: dx_hash["media"] = media_type resp = dxpy.api.file_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
def _new(self, dx_hash, media_type=None, **kwargs)
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param media_type: Internet Media Type :type media_type: string Creates a new remote file with media type *media_type*, if given.
3.801313
3.95738
0.960563
''' :param dxid: Object ID :type dxid: string :param project: Project ID :type project: string Discards the currently stored ID and associates the handler with *dxid*. As a side effect, it also flushes the buffer for the previous file object if the buffer is nonempty. ''' if self._dxid is not None: self.flush() DXDataObject.set_ids(self, dxid, project) # Reset state self._pos = 0 self._file_length = None self._cur_part = 1 self._num_uploaded_parts = 0
def set_ids(self, dxid, project=None)
:param dxid: Object ID :type dxid: string :param project: Project ID :type project: string Discards the currently stored ID and associates the handler with *dxid*. As a side effect, it also flushes the buffer for the previous file object if the buffer is nonempty.
5.627716
2.346346
2.398502
''' :param offset: Position in the file to seek to :type offset: integer Seeks to *offset* bytes from the beginning of the file. This is a no-op if the file is open for writing. The position is computed from adding *offset* to a reference point; the reference point is selected by the *from_what* argument. A *from_what* value of 0 measures from the beginning of the file, 1 uses the current file position, and 2 uses the end of the file as the reference point. *from_what* can be omitted and defaults to 0, using the beginning of the file as the reference point. ''' if from_what == os.SEEK_SET: reference_pos = 0 elif from_what == os.SEEK_CUR: reference_pos = self._pos elif from_what == os.SEEK_END: if self._file_length == None: desc = self.describe() self._file_length = int(desc["size"]) reference_pos = self._file_length else: raise DXFileError("Invalid value supplied for from_what") orig_pos = self._pos self._pos = reference_pos + offset in_buf = False orig_buf_pos = self._read_buf.tell() if offset < orig_pos: if orig_buf_pos > orig_pos - offset: # offset is less than original position but within the buffer in_buf = True else: buf_len = dxpy.utils.string_buffer_length(self._read_buf) if buf_len - orig_buf_pos > offset - orig_pos: # offset is greater than original position but within the buffer in_buf = True if in_buf: # offset is within the buffer (at least one byte following # the offset can be read directly out of the buffer) self._read_buf.seek(orig_buf_pos - orig_pos + offset) elif offset == orig_pos: # This seek is a no-op (the cursor is just past the end of # the read buffer and coincides with the desired seek # position). We don't have the data ready, but the request # for the data starting here is already in flight. # # Detecting this case helps to optimize for sequential read # access patterns. pass else: # offset is outside the buffer-- reset buffer and queues. # This is the failsafe behavior self._read_buf = BytesIO() # TODO: if the offset is within the next response(s), don't throw out the queues self._request_iterator, self._response_iterator = None, None
def seek(self, offset, from_what=os.SEEK_SET)
:param offset: Position in the file to seek to :type offset: integer Seeks to *offset* bytes from the beginning of the file. This is a no-op if the file is open for writing. The position is computed from adding *offset* to a reference point; the reference point is selected by the *from_what* argument. A *from_what* value of 0 measures from the beginning of the file, 1 uses the current file position, and 2 uses the end of the file as the reference point. *from_what* can be omitted and defaults to 0, using the beginning of the file as the reference point.
4.540232
3.294499
1.378125
''' Flushes the internal write buffer. ''' if self._write_buf.tell() > 0: data = self._write_buf.getvalue() self._write_buf = BytesIO() if multithread: self._async_upload_part_request(data, index=self._cur_part, **kwargs) else: self.upload_part(data, self._cur_part, **kwargs) self._cur_part += 1 if len(self._http_threadpool_futures) > 0: dxpy.utils.wait_for_all_futures(self._http_threadpool_futures) try: for future in self._http_threadpool_futures: if future.exception() != None: raise future.exception() finally: self._http_threadpool_futures = set()
def flush(self, multithread=True, **kwargs)
Flushes the internal write buffer.
3.035717
2.821778
1.075817
''' :param data: Data to be written :type data: str or mmap object :param multithread: If True, sends multiple write requests asynchronously :type multithread: boolean Writes the data *data* to the file. .. note:: Writing to remote files is append-only. Using :meth:`seek` does not affect where the next :meth:`write` will occur. ''' if not USING_PYTHON2: assert(isinstance(data, bytes)) self._ensure_write_bufsize(**kwargs) def write_request(data_for_write_req): if multithread: self._async_upload_part_request(data_for_write_req, index=self._cur_part, **kwargs) else: self.upload_part(data_for_write_req, self._cur_part, **kwargs) self._cur_part += 1 if self._write_buf.tell() == 0 and self._write_bufsize == len(data): # In the special case of a write that is the same size as # our write buffer size, and no unflushed data in the # buffer, just directly dispatch the write and bypass the # write buffer. # # This saves a buffer copy, which is especially helpful if # 'data' is actually mmap'd from a file. # # TODO: an additional optimization could be made to allow # the last request from an mmap'd upload to take this path # too (in general it won't because it's not of length # _write_bufsize). This is probably inconsequential though. write_request(data) return remaining_space = self._write_bufsize - self._write_buf.tell() if len(data) <= remaining_space: self._write_buf.write(data) else: self._write_buf.write(data[:remaining_space]) temp_data = self._write_buf.getvalue() self._write_buf = BytesIO() write_request(temp_data) # TODO: check if repeat string splitting is bad for # performance when len(data) >> _write_bufsize self.write(data[remaining_space:], **kwargs)
def _write2(self, data, multithread=True, **kwargs)
:param data: Data to be written :type data: str or mmap object :param multithread: If True, sends multiple write requests asynchronously :type multithread: boolean Writes the data *data* to the file. .. note:: Writing to remote files is append-only. Using :meth:`seek` does not affect where the next :meth:`write` will occur.
5.092974
3.845751
1.324312
''' :param data: Data to be written :type data: str or mmap object :param multithread: If True, sends multiple write requests asynchronously :type multithread: boolean Writes the data *data* to the file. .. note:: Writing to remote files is append-only. Using :meth:`seek` does not affect where the next :meth:`write` will occur. ''' if USING_PYTHON2: self._write2(data, multithread=multithread, **kwargs) else: # In python3, the underlying system methods use the 'bytes' type, not 'string' # # This is, hopefully, a temporary hack. It is not a good idea for two reasons: # 1) Performance, we need to make a pass on the data, and need to allocate # another buffer of similar size # 2) The types are wrong. The "bytes" type should be visible to the caller # of the write method, instead of being hidden. # Should we throw an exception if the file is opened in binary mode, # and the data is unicode/text? if isinstance(data, str): bt = data.encode("utf-8") elif isinstance(data, bytearray): bt = bytes(data) elif isinstance(data, bytes): bt = data elif isinstance(data, mmap.mmap): bt = bytes(data) else: raise DXFileError("Invalid type {} for write data argument".format(type(data))) assert(isinstance(bt, bytes)) self._write2(bt, multithread=multithread, **kwargs)
def write(self, data, multithread=True, **kwargs)
:param data: Data to be written :type data: str or mmap object :param multithread: If True, sends multiple write requests asynchronously :type multithread: boolean Writes the data *data* to the file. .. note:: Writing to remote files is append-only. Using :meth:`seek` does not affect where the next :meth:`write` will occur.
5.28866
3.588709
1.473694
''' :param block: If True, this function blocks until the remote file has closed. :type block: boolean Attempts to close the file. .. note:: The remote file cannot be closed until all parts have been fully uploaded. An exception will be thrown if this is not the case. ''' self.flush(**kwargs) # Also populates emptyLastPartAllowed self._ensure_write_bufsize(**kwargs) if self._num_uploaded_parts == 0 and self._empty_last_part_allowed: # We haven't uploaded any parts in this session. # In case no parts have been uploaded at all and region # settings allow last empty part upload, try to upload # an empty part (otherwise files with 0 parts cannot be closed). try: if USING_PYTHON2: self.upload_part('', 1, **kwargs) else: self.upload_part(b'', 1, **kwargs) except dxpy.exceptions.InvalidState: pass if 'report_progress_fn' in kwargs: del kwargs['report_progress_fn'] dxpy.api.file_close(self._dxid, **kwargs) if block: self._wait_on_close(**kwargs)
def close(self, block=False, **kwargs)
:param block: If True, this function blocks until the remote file has closed. :type block: boolean Attempts to close the file. .. note:: The remote file cannot be closed until all parts have been fully uploaded. An exception will be thrown if this is not the case.
5.662802
4.007582
1.413022
if not USING_PYTHON2: # In python3, the underlying system methods use the 'bytes' type, not 'string' assert(isinstance(data, bytes)) req_input = {} if index is not None: req_input["index"] = int(index) md5 = hashlib.md5() if hasattr(data, 'seek') and hasattr(data, 'tell'): # data is a buffer; record initial position (so we can rewind back) rewind_input_buffer_offset = data.tell() while True: bytes_read = data.read(MD5_READ_CHUNK_SIZE) if bytes_read: md5.update(bytes_read) else: break # rewind the buffer to original position data.seek(rewind_input_buffer_offset) else: md5.update(data) req_input["md5"] = md5.hexdigest() req_input["size"] = len(data) def get_upload_url_and_headers(): # This function is called from within a retry loop, so to avoid amplifying the number of retries # geometrically, we decrease the allowed number of retries for the nested API call every time. if 'max_retries' not in kwargs: kwargs['max_retries'] = dxpy.DEFAULT_RETRIES elif kwargs['max_retries'] > 0: kwargs['max_retries'] -= 1 if "timeout" not in kwargs: kwargs["timeout"] = FILE_REQUEST_TIMEOUT resp = dxpy.api.file_upload(self._dxid, req_input, **kwargs) url = resp["url"] return url, _validate_headers(resp.get("headers", {})) # The file upload API requires us to get a pre-authenticated upload URL (and headers for it) every time we # attempt an upload. Because DXHTTPRequest will retry requests under retryable conditions, we give it a callback # to ask us for a new upload URL every time it attempts a request (instead of giving them directly). dxpy.DXHTTPRequest(get_upload_url_and_headers, data, jsonify_data=False, prepend_srv=False, always_retry=True, timeout=FILE_REQUEST_TIMEOUT, auth=None, method='PUT') self._num_uploaded_parts += 1 if display_progress: warn(".") if report_progress_fn is not None: report_progress_fn(self, len(data))
def upload_part(self, data, index=None, display_progress=False, report_progress_fn=None, **kwargs)
:param data: Data to be uploaded in this part :type data: str or mmap object, bytes on python3 :param index: Index of part to be uploaded; must be in [1, 10000] :type index: integer :param display_progress: Whether to print "." to stderr when done :type display_progress: boolean :param report_progress_fn: Optional: a function to call that takes in two arguments (self, # bytes transmitted) :type report_progress_fn: function or None :raises: :exc:`dxpy.exceptions.DXFileError` if *index* is given and is not in the correct range, :exc:`requests.exceptions.HTTPError` if upload fails Uploads the data in *data* as part number *index* for the associated file. If no value for *index* is given, *index* defaults to 1. This probably only makes sense if this is the only part to be uploaded.
4.582256
4.470378
1.025027
cmd = ['dx', 'ssh', '--suppress-running-check', job_id, '-o', 'StrictHostKeyChecking no'] cmd += ['-f', '-L', '{0}:localhost:{1}'.format(local_port, remote_port), '-N'] subprocess.check_call(cmd)
def setup_ssh_tunnel(job_id, local_port, remote_port)
Setup an ssh tunnel to the given job-id. This will establish the port over the given local_port to the given remote_port and then exit, keeping the tunnel in place until the job is terminated.
4.571255
4.994894
0.915186
sys.stdout.write('Waiting for server in {0} to initialize ...'.format(job_id)) sys.stdout.flush() desc = dxpy.describe(job_id) # Keep checking until the server has begun or it has failed. while(SERVER_READY_TAG not in desc['tags'] and desc['state'] != 'failed'): time.sleep(SLEEP_PERIOD) sys.stdout.write('.') sys.stdout.flush() desc = dxpy.describe(job_id) # If the server job failed, provide friendly advice. if desc['state'] == 'failed': msg = RED('Error:') + ' Server failed to run.\n' msg += 'You may want to check the job logs by running:' msg += BOLD('dx watch {0}'.format(job_id)) err_exit(msg)
def poll_for_server_running(job_id)
Poll for the job to start running and post the SERVER_READY_TAG.
4.254395
4.023041
1.057507
if platform == "linux" or platform == "linux2": cmd = ['xdg-open', cmd] elif platform == "darwin": cmd = ['open', cmd] elif platform == "win32": cmd = ['start', cmd] subprocess.check_call(cmd)
def multi_platform_open(cmd)
Take the given command and use the OS to automatically open the appropriate resource. For instance, if a URL is provided, this will have the OS automatically open the URL in the default web browser.
1.814966
2.018499
0.899166
notebook_apps = dxpy.find_apps(name=NOTEBOOK_APP, all_versions=True) versions = [str(dxpy.describe(app['id'])['version']) for app in notebook_apps] return versions
def get_notebook_app_versions()
Get the valid version numbers of the notebook app.
3.91452
3.667923
1.067231
# Check that ssh is setup. Currently notebooks require ssh for tunelling. ssh_config_check() if args.only_check_config: return # If the user requested a specific version of the notebook server, # get the executable id. if args.version is not None: executable = get_app_from_path('app-{0}/{1}'.format(NOTEBOOK_APP, args.version)) if executable is not None and 'id' in executable: executable = executable['id'] else: msg = RED('Warning:') + ' Invalid notebook version: {0}\nValid versions are: '.format(args.version) msg += BOLD('{0}'.format(str(get_notebook_app_versions()))) err_exit(msg) else: executable = 'app-{0}'.format(NOTEBOOK_APP) # Compose the command to launch the notebook cmd = ['dx', 'run', executable, '-inotebook_type={0}'.format(args.notebook_type)] cmd += ['-iinput_files={0}'.format(f) for f in args.notebook_files] cmd += ['-itimeout={0}'.format(args.timeout), '-y', '--brief', '--allow-ssh', '--instance-type', args.instance_type] if args.spark: cmd += ['-iinstall_spark=true'] if args.snapshot: cmd += ['-isnapshot={0}'.format(args.snapshot)] job_id = subprocess.check_output(cmd).strip() poll_for_server_running(job_id) if args.notebook_type in {'jupyter', 'jupyter_lab', 'jupyter_notebook'}: remote_port = 8888 setup_ssh_tunnel(job_id, args.port, remote_port) if args.open_server: multi_platform_open('http://localhost:{0}'.format(args.port)) print('A web browser should have opened to connect you to your notebook.') print('If no browser appears, or if you need to reopen a browser at any point, you should be able to point your browser to http://localhost:{0}'.format(args.port))
def run_notebook(args, ssh_config_check)
Launch the notebook server.
4.979423
4.911057
1.013921
NWORDS = _train(known_words) candidates = _known([word], NWORDS) or _known(_edits1(word), NWORDS) or _known_edits2(word, NWORDS) or [word] return max(candidates, key=NWORDS.get)
def correct(word, known_words)
:param word: Word to correct :type word: string :param known_words: List of known words :type known_words: iterable of strings Given **word**, suggests a correction from **known_words**. If no reasonably close correction is found, returns **word**.
4.618794
4.985095
0.926521
params = job_config.input_reader_params shard_count = job_config.shard_count query_spec = cls._get_query_spec(params) if not property_range.should_shard_by_property_range(query_spec.filters): return super(ModelDatastoreInputReader, cls).split_input(job_config) p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(shard_count) # User specified a namespace. if query_spec.ns: ns_range = namespace_range.NamespaceRange( namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] else: ns_keys = namespace_range.get_namespace_keys( query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1) if not ns_keys: return # User doesn't specify ns but the number of ns is small. # We still split by property range. if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD: ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app) for _ in p_ranges] # Lots of namespaces. Split by ns. else: ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=False, can_query=lambda: True, _app=query_spec.app) p_ranges = [copy.copy(p_range) for _ in ns_ranges] assert len(p_ranges) == len(ns_ranges) iters = [ db_iters.RangeIteratorFactory.create_property_range_iterator( p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)] return [cls(i) for i in iters]
def split_input(cls, job_config)
Inherit docs.
3.353891
3.291516
1.01895
super(ModelDatastoreInputReader, cls).validate(job_config) params = job_config.input_reader_params entity_kind = params[cls.ENTITY_KIND_PARAM] # Fail fast if Model cannot be located. try: model_class = util.for_name(entity_kind) except ImportError, e: raise errors.BadReaderParamsError("Bad entity kind: %s" % e) if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] if issubclass(model_class, db.Model): cls._validate_filters(filters, model_class) else: cls._validate_filters_ndb(filters, model_class) property_range.PropertyRange(filters, entity_kind)
def validate(cls, job_config)
Inherit docs.
3.973861
3.799511
1.045887
if not filters: return properties = model_class.properties() for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class.kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop].validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
def _validate_filters(cls, filters, model_class)
Validate user supplied filters. Validate filters are on existing properties and filter values have valid semantics. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type is up to the property's type. model_class: the db.Model class for the entity type to apply filters on. Raises: BadReaderParamsError: if any filter is invalid in any way.
5.317312
4.145453
1.282685
if not filters: return properties = model_class._properties for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class._get_kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop]._do_validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
def _validate_filters_ndb(cls, filters, model_class)
Validate ndb.Model filters.
5.462588
5.261225
1.038273
if ndb is not None and isinstance(value, ndb.Model): return None if getattr(value, "_populate_internal_entity", None): return value._populate_internal_entity() return value
def _normalize_entity(value)
Return an entity from an entity or model instance.
5.27122
4.26832
1.234964
if ndb is not None and isinstance(value, (ndb.Model, ndb.Key)): return None if getattr(value, "key", None): return value.key() elif isinstance(value, basestring): return datastore.Key(value) else: return value
def _normalize_key(value)
Return a key from an entity, model instance, key, or key string.
3.911032
3.139462
1.245765
if self.should_flush(): self.flush() self.items.append(item)
def append(self, item)
Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list.
6.11967
5.202465
1.176302
if not self.items: return retry = 0 options = {"deadline": DATASTORE_DEADLINE} while retry <= self.__timeout_retries: try: self.__flush_function(self.items, options) self.clear() break except db.Timeout, e: logging.warning(e) logging.warning("Flushing '%s' timed out. Will retry for the %s time.", self, retry) retry += 1 options["deadline"] *= 2 except apiproxy_errors.RequestTooLargeError: self._log_largest_items() raise else: raise
def flush(self)
Force a flush.
5.525031
5.356001
1.031559
actual_entity = _normalize_entity(entity) if actual_entity is None: return self.ndb_put(entity) self.puts.append(actual_entity)
def put(self, entity)
Registers entity to put to datastore. Args: entity: an entity or model instance to put.
6.23486
5.654063
1.102722
assert ndb is not None and isinstance(entity, ndb.Model) self.ndb_puts.append(entity)
def ndb_put(self, entity)
Like put(), but for NDB entities.
4.621603
4.223648
1.094221
key = _normalize_key(entity) if key is None: return self.ndb_delete(entity) self.deletes.append(key)
def delete(self, entity)
Registers entity to delete from datastore. Args: entity: an entity, model instance, or key to delete.
6.577676
6.572161
1.000839
if ndb is not None and isinstance(entity_or_key, ndb.Model): key = entity_or_key.key else: key = entity_or_key self.ndb_deletes.append(key)
def ndb_delete(self, entity_or_key)
Like delete(), but for NDB entities/keys.
2.47789
2.313424
1.071092
self.puts.flush() self.deletes.flush() self.ndb_puts.flush() self.ndb_deletes.flush()
def flush(self)
Flush(apply) all changed to datastore.
4.622487
3.745275
1.234218
datastore.Put(items, config=self._create_config(options))
def _flush_puts(self, items, options)
Flush all puts to datastore.
24.033228
18.089766
1.328554
datastore.Delete(items, config=self._create_config(options))
def _flush_deletes(self, items, options)
Flush all deletes to datastore.
24.318562
17.312263
1.404701
assert ndb is not None ndb.put_multi(items, config=self._create_config(options))
def _flush_ndb_puts(self, items, options)
Flush all NDB puts to datastore.
6.170493
5.902741
1.045361
assert ndb is not None ndb.delete_multi(items, config=self._create_config(options))
def _flush_ndb_deletes(self, items, options)
Flush all deletes to datastore.
6.554608
6.297352
1.040851
return datastore.CreateConfig(deadline=options["deadline"], force_writes=self.force_writes)
def _create_config(self, options)
Creates datastore Config. Returns: A datastore_rpc.Configuration instance.
18.998636
18.681145
1.016995