_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q12000
_image_to_data
train
def _image_to_data(img): """ Does the work of encoding an image into Base64 """ # If the image is already encoded in Base64, we have nothing to do here if "src" not in img.attrs or img["src"].startswith("data:"): return elif re.match("https?://", img["src"]): img_data = _load_url(img["src"]).read() else: img_data = _load_file(img["src"]).read() img_type = imghdr.what("", img_data) img_b64 = base64.b64encode(img_data) src_data = "data:image/none;base64," if img_type: src_data = "data:image/{};base64,{}".format(img_type, img_b64) img["src"] = src_data
python
{ "resource": "" }
q12001
_bake_css
train
def _bake_css(link): """ Takes a link element and turns it into an inline style link if applicable """ if "href" in link.attrs and (re.search("\.css$", link["href"])) or ("rel" in link.attrs and link["rel"] is "stylesheet") or ("type" in link.attrs and link["type"] is "text/css"): if re.match("https?://", link["href"]): css_data = _load_url(link["href"]).read() else: css_data = _load_file(link["href"]).read() link.clear() if USING_PYTHON2: link.string = css_data else: link.string = str(css_data) link.name = "style" del link["rel"] del link["href"]
python
{ "resource": "" }
q12002
_bake_script
train
def _bake_script(script): """ Takes a script element and bakes it in only if it contains a remote resource """ if "src" in script.attrs: if re.match("https?://", script["src"]): script_data = _load_url(script["src"]).read() else: script_data = _load_file(script["src"]).read() script.clear() if USING_PYTHON2: script.string = "\n" + script_data + "\n" else: script.string = "\n" + str(script_data) + "\n" del script["src"] del script["type"]
python
{ "resource": "" }
q12003
_load_file
train
def _load_file(path): """ Loads a file from the local filesystem """ if not os.path.exists(path): parser.error("{} was not found!".format(path)) if USING_PYTHON2: mode = "r" else: mode = "rb" try: f = open(path, mode) return f except IOError as ex: parser.error("{path} could not be read due to an I/O error! ({ex})".format(path=path, ex=ex))
python
{ "resource": "" }
q12004
_load_url
train
def _load_url(url): """ Loads a URL resource from a remote server """ try: response = requests.get(url) return BytesIO(response.content) except IOError as ex: parser.error("{url} could not be loaded remotely! ({ex})".format(url=url, ex=ex))
python
{ "resource": "" }
q12005
_get_bs4_string
train
def _get_bs4_string(soup): """ Outputs a BeautifulSoup object as a string that should hopefully be minimally modified """ if len(soup.find_all("script")) == 0: soup_str = soup.prettify(formatter=None).strip() else: soup_str = str(soup.html) soup_str = re.sub("&amp;", "&", soup_str) soup_str = re.sub("&lt;", "<", soup_str) soup_str = re.sub("&gt;", ">", soup_str) return soup_str
python
{ "resource": "" }
q12006
bake
train
def bake(src): """ Runs the encoder on the given source file """ src = os.path.realpath(src) path = os.path.dirname(src) filename = os.path.basename(src) html = _load_file(src).read() if imghdr.what("", html): html = "<html><body><img src='{}'/></body></html>".format(cgi.escape(filename)) # Change to the file's directory so image files with relative paths can be loaded correctly cwd = os.getcwd() os.chdir(path) bs_html = bs4.BeautifulSoup(html, "html.parser") images = bs_html.find_all("img") for image in images: _image_to_data(image) for link in bs_html.find_all("link"): _bake_css(link) for script in bs_html.find_all("script"): _bake_script(script) os.chdir(cwd) return bs_html
python
{ "resource": "" }
q12007
upload_html
train
def upload_html(destination, html, name=None): """ Uploads the HTML to a file on the server """ [project, path, n] = parse_destination(destination) try: dxfile = dxpy.upload_string(html, media_type="text/html", project=project, folder=path, hidden=True, name=name or None) return dxfile.get_id() except dxpy.DXAPIError as ex: parser.error("Could not upload HTML report to DNAnexus server! ({ex})".format(ex=ex))
python
{ "resource": "" }
q12008
create_record
train
def create_record(destination, file_ids, width=None, height=None): """ Creates a master record for the HTML report; this doesn't contain contain the actual HTML, but reports are required to be records rather than files and we can link more than one HTML file to a report """ [project, path, name] = parse_destination(destination) files = [dxpy.dxlink(file_id) for file_id in file_ids] details = {"files": files} if width: details["width"] = width if height: details["height"] = height try: dxrecord = dxpy.new_dxrecord(project=project, folder=path, types=["Report", "HTMLReport"], details=details, name=name) dxrecord.close() return dxrecord.get_id() except dxpy.DXAPIError as ex: parser.error("Could not create an HTML report record on DNAnexus servers! ({ex})".format(ex=ex))
python
{ "resource": "" }
q12009
save
train
def save(filename, html): """ Creates a baked HTML file on the local system """ try: out_file = open(filename, "w") out_file.write(html) out_file.close() except IOError as ex: parser.error("Could not write baked HTML to local file {name}. ({ex})".format(name=filename, ex=ex))
python
{ "resource": "" }
q12010
get_size_str
train
def get_size_str(size): """ Formats a byte size as a string. The returned string is no more than 9 characters long. """ if size == 0: magnitude = 0 level = 0 else: magnitude = math.floor(math.log(size, 10)) level = int(min(math.floor(magnitude // 3), 4)) return ('%d' if level == 0 else '%.2f') % (float(size) / 2**(level*10)) + ' ' + SIZE_LEVEL[level]
python
{ "resource": "" }
q12011
get_ls_l_desc
train
def get_ls_l_desc(desc, include_folder=False, include_project=False): """ desc must have at least all the fields given by get_ls_l_desc_fields. """ # If you make this method consume an additional field, you must add it to # get_ls_l_desc_fields above. if 'state' in desc: state_len = len(desc['state']) if desc['state'] != 'closed': state_str = YELLOW() + desc['state'] + ENDC() else: state_str = GREEN() + desc['state'] + ENDC() else: state_str = '' state_len = 0 name_str = '' if include_folder: name_str += desc['folder'] + ('/' if desc['folder'] != '/' else '') name_str += desc['name'] if desc['class'] in ['applet', 'workflow']: name_str = BOLD() + GREEN() + name_str + ENDC() size_str = '' if 'size' in desc and desc['class'] == 'file': size_str = get_size_str(desc['size']) elif 'length' in desc: size_str = str(desc['length']) + ' rows' size_padding = ' ' * max(0, 9 - len(size_str)) return (state_str + DELIMITER(' '*(8 - state_len)) + render_short_timestamp(desc['modified']) + DELIMITER(' ') + size_str + DELIMITER(size_padding + ' ') + name_str + DELIMITER(' (') + ((desc['project'] + DELIMITER(':')) if include_project else '') + desc['id'] + DELIMITER(')'))
python
{ "resource": "" }
q12012
_recursive_cleanup
train
def _recursive_cleanup(foo): """ Aggressively cleans up things that look empty. """ if isinstance(foo, dict): for (key, val) in list(foo.items()): if isinstance(val, dict): _recursive_cleanup(val) if val == "" or val == [] or val == {}: del foo[key]
python
{ "resource": "" }
q12013
dump_executable
train
def dump_executable(executable, destination_directory, omit_resources=False, describe_output={}): """ Reconstitutes an app, applet, or a workflow into a directory that would create a functionally identical executable if "dx build" were run on it. destination_directory will be the root source directory for the executable. :param executable: executable, i.e. app, applet, or workflow, to be dumped :type executable: DXExecutable (either of: DXApp, DXApplet, DXWorkflow, DXGlobalWorkflow) :param destination_directory: an existing, empty, and writable directory :type destination_directory: str :param omit_resources: if True, executable's resources will not be downloaded :type omit_resources: boolean :param describe_output: output of a describe API call on the executable :type describe_output: dictionary """ try: old_cwd = os.getcwd() os.chdir(destination_directory) if isinstance(executable, dxpy.DXWorkflow): _dump_workflow(executable, describe_output) elif isinstance(executable, dxpy.DXGlobalWorkflow): # Add inputs, outputs, stages. These fields contain region-specific values # e.g. files or applets, that's why: # * if the workflow is global, we will unpack the underlying workflow # from the region of the current project context # * if this is a regular, project-based workflow, we will just use # its description (the describe_output that we already have) # Underlying workflows are workflows stored in resource containers # of the global workflow (one per each region the global workflow is # enabled in). #TODO: add a link to documentation. current_project = dxpy.WORKSPACE_ID if not current_project: raise DXError( 'A project needs to be selected to "dx get" a global workflow. You can use "dx select" to select a project') region = dxpy.api.project_describe(current_project, input_params={"fields": {"region": True}})["region"] describe_output = executable.append_underlying_workflow_desc(describe_output, region) _dump_workflow(executable, describe_output) else: _dump_app_or_applet(executable, omit_resources, describe_output) except: err_exit() finally: os.chdir(old_cwd)
python
{ "resource": "" }
q12014
_gen_helper_dict
train
def _gen_helper_dict(filtered_inputs): ''' Create a dict of values for the downloaded files. This is similar to the variables created when running a bash app. ''' file_key_descs, _ignore = file_load_utils.analyze_bash_vars( file_load_utils.get_input_json_file(), None) flattened_dict = {} def add_if_no_collision(key, value, dict_): if key not in dict_: dict_[key] = value for input_ in filtered_inputs: if input_ not in file_key_descs: continue input_var_dict = file_key_descs[input_] add_if_no_collision(input_ + '_path', input_var_dict["path"], flattened_dict) add_if_no_collision(input_ + '_name', input_var_dict["basename"], flattened_dict) add_if_no_collision(input_ + '_prefix', input_var_dict["prefix"], flattened_dict) return flattened_dict
python
{ "resource": "" }
q12015
_get_num_parallel_threads
train
def _get_num_parallel_threads(max_threads, num_cores, mem_available_mb): ''' Ensure at least ~1.2 GB memory per thread, see PTFM-18767 ''' return min(max_threads, num_cores, max(int(mem_available_mb/1200), 1))
python
{ "resource": "" }
q12016
main
train
def main(**kwargs): """ Draw a couple of simple graphs and optionally generate an HTML file to upload them """ draw_lines() draw_histogram() draw_bar_chart() destination = "-r /report" if use_html: generate_html() command = "dx-build-report-html {h} {d}".format(h=html_filename, d=destination) else: command = "dx-build-report-html {l} {b} {h} {d}".format(l=lines_filename, b=bars_filename, h=histogram_filename, d=destination) sub_output = json.loads(subprocess.check_output(command, shell=True)) output = {} output["report"] = dxpy.dxlink(sub_output["recordId"]) return output
python
{ "resource": "" }
q12017
draw_lines
train
def draw_lines(): """ Draws a line between a set of random values """ r = numpy.random.randn(200) fig = pyplot.figure() ax = fig.add_subplot(111) ax.plot(r) ax.grid(True) pyplot.savefig(lines_filename)
python
{ "resource": "" }
q12018
generate_html
train
def generate_html(): """ Generate an HTML file incorporating the images produced by this script """ html_file = open(html_filename, "w") html_file.write("<html><body>") html_file.write("<h1>Here are some graphs for you!</h1>") for image in [lines_filename, bars_filename, histogram_filename]: html_file.write("<div><h2>{0}</h2><img src='{0}' /></div>".format(image)) html_file.write("</body></html>") html_file.close()
python
{ "resource": "" }
q12019
_safe_unicode
train
def _safe_unicode(o): """ Returns an equivalent unicode object, trying harder to avoid dependencies on the Python default encoding. """ def clean(s): return u''.join([c if c in ASCII_PRINTABLE else '?' for c in s]) if USING_PYTHON2: try: return unicode(o) except: try: s = str(o) try: return s.decode("utf-8") except: return clean(s[:2048]) + u" [Raw error message: " + unicode(s.encode("hex"), 'utf-8') + u"]" except: return u"(Unable to decode Python exception message)" else: return str(o)
python
{ "resource": "" }
q12020
_format_exception_message
train
def _format_exception_message(e): """ Formats the specified exception. """ # Prevent duplication of "AppError" in places that print "AppError" # and then this formatted string if isinstance(e, dxpy.AppError): return _safe_unicode(e) if USING_PYTHON2: return unicode(e.__class__.__name__, 'utf-8') + ": " + _safe_unicode(e) else: return e.__class__.__name__ + ": " + _safe_unicode(e)
python
{ "resource": "" }
q12021
run
train
def run(function_name=None, function_input=None): """Triggers the execution environment entry point processor. Use this function in the program entry point code: .. code-block:: python import dxpy @dxpy.entry_point('main') def hello(i): pass dxpy.run() This method may be used to invoke the program either in a production environment (inside the execution environment) or for local debugging (in the debug harness), as follows: If the environment variable *DX_JOB_ID* is set, the processor retrieves the job with that ID from the API server. The job's *function* field indicates the function name to be invoked. That function name is looked up in the table of all methods decorated with *@dxpy.entry_point('name')* in the module from which :func:`run()` was called, and the matching method is invoked (with the job's input supplied as parameters). This is the mode of operation used in the DNAnexus execution environment. .. warning:: The parameters *function_name* and *function_input* are disregarded in this mode of operation. If the environment variable *DX_JOB_ID* is not set, the function name may be given in *function_name*; if not set, it is set by the environment variable *DX_TEST_FUNCTION*. The function input may be given in *function_input*; if not set, it is set by the local file *job_input.json* which is expected to be present. The absence of *DX_JOB_ID* signals to :func:`run()` that execution is happening in the debug harness. In this mode of operation, all calls to :func:`dxpy.bindings.dxjob.new_dxjob()` (and higher level handler methods which use it) are intercepted, and :func:`run()` is invoked instead with appropriate inputs. """ global RUN_COUNT RUN_COUNT += 1 dx_working_dir = os.getcwd() if dxpy.JOB_ID is not None: logging.basicConfig() try: logging.getLogger().addHandler(dxpy.DXLogHandler()) except dxpy.exceptions.DXError: print("TODO: FIXME: the EE client should die if logging is not available") job = dxpy.describe(dxpy.JOB_ID) else: if function_name is None: function_name = os.environ.get('DX_TEST_FUNCTION', 'main') if function_input is None: with open("job_input.json", "r") as fh: function_input = json.load(fh) job = {'function': function_name, 'input': function_input} with open("job_error_reserved_space", "w") as fh: fh.write("This file contains reserved space for writing job errors in case the filesystem becomes full.\n" + " "*1024*64) print("Invoking", job.get('function'), "with", job.get('input')) try: result = ENTRY_POINT_TABLE[job['function']](**job['input']) except dxpy.AppError as e: save_error(e, dx_working_dir, error_type="AppError") raise except Exception as e: save_error(e, dx_working_dir) raise if result is not None: # TODO: protect against client removing its original working directory os.chdir(dx_working_dir) if USING_PYTHON2: # On python-2 we need to use binary mode with open("job_output.json", "wb") as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write(b"\n") else: with open("job_output.json", "w") as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write("\n") return result
python
{ "resource": "" }
q12022
entry_point
train
def entry_point(entry_point_name): """Use this to decorate a DNAnexus execution environment entry point. Example: .. code-block:: python @dxpy.entry_point('main') def hello(i): pass """ def wrap(f): ENTRY_POINT_TABLE[entry_point_name] = f @wraps(f) def wrapped_f(*args, **kwargs): return f(*args, **kwargs) return wrapped_f return wrap
python
{ "resource": "" }
q12023
user_info
train
def user_info(authserver_host=None, authserver_port=None): """Returns the result of the user_info call against the specified auth server. .. deprecated:: 0.108.0 Use :func:`whoami` instead where possible. """ authserver = get_auth_server_name(authserver_host, authserver_port) return DXHTTPRequest(authserver + "/system/getUserInfo", {}, prepend_srv=False)
python
{ "resource": "" }
q12024
get_job_input_filenames
train
def get_job_input_filenames(job_input_file): """Extract list of files, returns a set of directories to create, and a set of files, with sources and destinations. The paths created are relative to the input directory. Note: we go through file names inside arrays, and create a separate subdirectory for each. This avoids clobbering files when duplicate filenames appear in an array. """ def get_input_hash(): with open(job_input_file) as fh: job_input = json.load(fh) return job_input job_input = get_input_hash() files = collections.defaultdict(list) # dictionary, with empty lists as default elements dirs = [] # directories to create under <idir> # Local function for adding a file to the list of files to be created # for example: # iname == "seq1" # subdir == "015" # value == { "$dnanexus_link": { # "project": "project-BKJfY1j0b06Z4y8PX8bQ094f", # "id": "file-BKQGkgQ0b06xG5560GGQ001B" # } # will create a record describing that the file should # be downloaded into seq1/015/<filename> def add_file(iname, subdir, value): if not dxpy.is_dxlink(value): return handler = dxpy.get_handler(value) if not isinstance(handler, dxpy.DXFile): return filename = make_unix_filename(handler.name) trg_dir = iname if subdir is not None: trg_dir = os.path.join(trg_dir, subdir) files[iname].append({'trg_fname': os.path.join(trg_dir, filename), 'handler': handler, 'src_file_id': handler.id}) dirs.append(trg_dir) # An array of inputs, for a single key. A directory # will be created per array entry. For example, if the input key is # FOO, and the inputs are {A, B, C}.vcf then, the directory structure # will be: # <idir>/FOO/00/A.vcf # <idir>/FOO/01/B.vcf # <idir>/FOO/02/C.vcf def add_file_array(input_name, links): num_files = len(links) if num_files == 0: return num_digits = len(str(num_files - 1)) dirs.append(input_name) for i, link in enumerate(links): subdir = str(i).zfill(num_digits) add_file(input_name, subdir, link) for input_name, value in list(job_input.items()): if isinstance(value, list): # This is a file array add_file_array(input_name, value) else: add_file(input_name, None, value) ## create a dictionary of the all non-file elements rest_hash = {key: val for key, val in list(job_input.items()) if key not in files} return dirs, files, rest_hash
python
{ "resource": "" }
q12025
analyze_bash_vars
train
def analyze_bash_vars(job_input_file, job_homedir): ''' This function examines the input file, and calculates variables to instantiate in the shell environment. It is called right before starting the execution of an app in a worker. For each input key, we want to have $var $var_filename $var_prefix remove last dot (+gz), and/or remove patterns $var_path $HOME/in/var/$var_filename For example, $HOME/in/genes/A.txt B.txt export genes=('{"$dnanexus_link": "file-xxxx"}' '{"$dnanexus_link": "file-yyyy"}') export genes_filename=("A.txt" "B.txt") export genes_prefix=("A" "B") export genes_path=("$HOME/in/genes/A.txt" "$HOME/in/genes/B.txt") If there are patterns defined in the input spec, then the prefix respects them. Here are several examples, where the patterns are: *.bam, *.bwa-index.tar.gz, foo*.sam, z*ra.sam file name prefix matches foo.zed.bam foo.zed *.bam xxx.bwa-index.tar.gz xxx *.bwa-index.tar.gz food.sam food foo*.sam zebra.sam zebra z*ra.sam xx.c xx xx.c.gz xx The only patterns we recognize are of the form x*.y. For example: legal *.sam, *.c.py, foo*.sam, a*b*c.baz ignored uu.txt x???.tar mon[a-z].py ''' _, file_entries, rest_hash = get_job_input_filenames(job_input_file) patterns_dict = get_input_spec_patterns() # Note: there may be multiple matches, choose the shortest prefix. def get_prefix(basename, key): best_prefix = None patterns = patterns_dict.get(key) if patterns is not None: for pattern in patterns: if fnmatch.fnmatch(basename, pattern): _, _, right_piece = pattern.rpartition("*") best_prefix = choose_shorter_string(best_prefix, basename[:-len(right_piece)]) if best_prefix is not None: return best_prefix else: # no matching rule parts = os.path.splitext(basename) if parts[1] == ".gz": parts = os.path.splitext(parts[0]) return parts[0] def factory(): return {'handler': [], 'basename': [], 'prefix': [], 'path': []} file_key_descs = collections.defaultdict(factory) rel_home_dir = get_input_dir(job_homedir) for key, entries in list(file_entries.items()): for entry in entries: filename = entry['trg_fname'] basename = os.path.basename(filename) prefix = get_prefix(basename, key) k_desc = file_key_descs[key] k_desc['handler'].append(entry['handler']) k_desc['basename'].append(basename) k_desc['prefix'].append(prefix) k_desc['path'].append(os.path.join(rel_home_dir, filename)) return file_key_descs, rest_hash
python
{ "resource": "" }
q12026
wait_for_a_future
train
def wait_for_a_future(futures, print_traceback=False): """ Return the next future that completes. If a KeyboardInterrupt is received, then the entire process is exited immediately. See wait_for_all_futures for more notes. """ while True: try: future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX)) break except concurrent.futures.TimeoutError: pass except KeyboardInterrupt: if print_traceback: traceback.print_stack() else: print('') os._exit(os.EX_IOERR) return future
python
{ "resource": "" }
q12027
_dict_raise_on_duplicates
train
def _dict_raise_on_duplicates(ordered_pairs): """ Reject duplicate keys. """ d = {} for k, v in ordered_pairs: if k in d: raise ValueError("duplicate key: %r" % (k,)) else: d[k] = v return d
python
{ "resource": "" }
q12028
assert_consistent_reg_options
train
def assert_consistent_reg_options(exec_type, json_spec, executable_builder_exeception): """ Validates the "regionalOptions" field and verifies all the regions used in "regionalOptions" have the same options. """ reg_options_spec = json_spec.get('regionalOptions') json_fn = 'dxapp.json' if exec_type == 'app' else 'dxworkflow.json' if not isinstance(reg_options_spec, dict): raise executable_builder_exeception("The field 'regionalOptions' in must be a mapping") if not reg_options_spec: raise executable_builder_exeception( "The field 'regionalOptions' in " + json_fn + " must be a non-empty mapping") regional_options_list = list(reg_options_spec.items()) for region, opts_for_region in regional_options_list: if not isinstance(opts_for_region, dict): raise executable_builder_exeception("The field 'regionalOptions['" + region + "']' in " + json_fn + " must be a mapping") if set(opts_for_region.keys()) != set(regional_options_list[0][1].keys()): if set(opts_for_region.keys()) - set(regional_options_list[0][1].keys()): with_key, without_key = region, regional_options_list[0][0] key_name = next(iter(set(opts_for_region.keys()) - set(regional_options_list[0][1].keys()))) else: with_key, without_key = regional_options_list[0][0], region key_name = next(iter(set(regional_options_list[0][1].keys()) - set(opts_for_region.keys()))) raise executable_builder_exeception( "All regions in regionalOptions must specify the same options; " + "%s was given for %s but not for %s" % (key_name, with_key, without_key) ) if exec_type == 'app': for key in opts_for_region: if key in json_spec.get('runSpec', {}): raise executable_builder_exeception( key + " cannot be given in both runSpec and in regional options for " + region)
python
{ "resource": "" }
q12029
_check_suggestions
train
def _check_suggestions(app_json, publish=False): """ Examines the specified dxapp.json file and warns about any violations of suggestions guidelines. :raises: AppBuilderException for data objects that could not be found """ for input_field in app_json.get('inputSpec', []): for suggestion in input_field.get('suggestions', []): if 'project' in suggestion: try: project = dxpy.api.project_describe(suggestion['project'], {"permissions": True}) if 'PUBLIC' not in project['permissions'] and publish: logger.warn('Project {name} NOT PUBLIC!'.format(name=project['name'])) except dxpy.exceptions.DXAPIError as e: if e.code == 404: logger.warn('Suggested project {name} does not exist, or not accessible by user'.format( name=suggestion['project'])) if 'path' in suggestion: try: check_folder_exists(suggestion['project'], suggestion['path'], '') except ResolutionError as e: logger.warn('Folder {path} could not be found in project {project}'.format( path=suggestion['path'], project=suggestion['project'])) if '$dnanexus_link' in suggestion: if suggestion['$dnanexus_link'].startswith(('file-', 'record-')): try: dnanexus_link = dxpy.describe(suggestion['$dnanexus_link']) except dxpy.exceptions.DXAPIError as e: if e.code == 404: raise dxpy.app_builder.AppBuilderException( 'Suggested object {name} could not be found'.format( name=suggestion['$dnanexus_link'])) except Exception as e: raise dxpy.app_builder.AppBuilderException(str(e)) if 'value' in suggestion and isinstance(suggestion["value"], dict): if '$dnanexus_link' in suggestion['value']: # Check if we have JSON or string if isinstance(suggestion['value']['$dnanexus_link'], dict): if 'project' in suggestion['value']['$dnanexus_link']: try: dxpy.api.project_describe(suggestion['value']['$dnanexus_link']['project']) except dxpy.exceptions.DXAPIError as e: if e.code == 404: logger.warn('Suggested project {name} does not exist, or not accessible by user'.format( name=suggestion['value']['$dnanexus_link']['project'])) elif isinstance(suggestion['value']['$dnanexus_link'], basestring): if suggestion['value']['$dnanexus_link'].startswith(('file-', 'record-')): try: dnanexus_link = dxpy.describe(suggestion['value']['$dnanexus_link']) except dxpy.exceptions.DXAPIError as e: if e.code == 404: raise dxpy.app_builder.AppBuilderException( 'Suggested object {name} could not be found'.format( name=suggestion['value']['$dnanexus_link'])) except Exception as e: raise dxpy.app_builder.AppBuilderException(str(e))
python
{ "resource": "" }
q12030
_check_syntax
train
def _check_syntax(code, lang, temp_dir, enforce=True): """ Checks that the code whose text is in CODE parses as LANG. Raises DXSyntaxError if there is a problem and "enforce" is True. """ # This function needs the language to be explicitly set, so we can # generate an appropriate temp filename. if lang == 'python2.7': temp_basename = 'inlined_code_from_dxapp_json.py' elif lang == 'bash': temp_basename = 'inlined_code_from_dxapp_json.sh' else: raise ValueError('lang must be one of "python2.7" or "bash"') # Dump the contents out to a temporary file, then call _check_file_syntax. with open(os.path.join(temp_dir, temp_basename), 'w') as ofile: ofile.write(code) _check_file_syntax(os.path.join(temp_dir, temp_basename), temp_dir, override_lang=lang, enforce=enforce)
python
{ "resource": "" }
q12031
_check_file_syntax
train
def _check_file_syntax(filename, temp_dir, override_lang=None, enforce=True): """ Checks that the code in FILENAME parses, attempting to autodetect the language if necessary. Raises IOError if the file cannot be read. Raises DXSyntaxError if there is a problem and "enforce" is True. """ def check_python(filename): # Generate a semi-recognizable name to write the pyc to. Of # course it's possible that different files being scanned could # have the same basename, so this path won't be unique, but the # checks don't run concurrently so this shouldn't cause any # problems. pyc_path = os.path.join(temp_dir, os.path.basename(filename) + ".pyc") try: if USING_PYTHON2: filename = filename.encode(sys.getfilesystemencoding()) py_compile.compile(filename, cfile=pyc_path, doraise=True) finally: try: os.unlink(pyc_path) except OSError: pass def check_bash(filename): if platform.system() == 'Windows': logging.warn( 'Skipping bash syntax check due to unavailability of bash on Windows.') else: subprocess.check_output(["/bin/bash", "-n", filename], stderr=subprocess.STDOUT) if override_lang == 'python2.7': checker_fn = check_python elif override_lang == 'bash': checker_fn = check_bash elif filename.endswith('.py'): checker_fn = check_python elif filename.endswith('.sh'): checker_fn = check_bash else: # Ignore other kinds of files. return # Do a test read of the file to catch errors like the file not # existing or not being readable. open(filename) try: checker_fn(filename) except subprocess.CalledProcessError as e: print(filename + " has a syntax error! Interpreter output:", file=sys.stderr) for line in e.output.strip("\n").split("\n"): print(" " + line.rstrip("\n"), file=sys.stderr) if enforce: raise DXSyntaxError(filename + " has a syntax error") except py_compile.PyCompileError as e: print(filename + " has a syntax error! Interpreter output:", file=sys.stderr) print(" " + e.msg.strip(), file=sys.stderr) if enforce: raise DXSyntaxError(e.msg.strip())
python
{ "resource": "" }
q12032
_parse_app_spec
train
def _parse_app_spec(src_dir): """Returns the parsed contents of dxapp.json. Raises either AppBuilderException or a parser error (exit codes 3 or 2 respectively) if this cannot be done. """ if not os.path.isdir(src_dir): parser.error("%s is not a directory" % src_dir) if not os.path.exists(os.path.join(src_dir, "dxapp.json")): raise dxpy.app_builder.AppBuilderException("Directory %s does not contain dxapp.json: not a valid DNAnexus app source directory" % src_dir) with open(os.path.join(src_dir, "dxapp.json")) as app_desc: try: return json_load_raise_on_duplicates(app_desc) except Exception as e: raise dxpy.app_builder.AppBuilderException("Could not parse dxapp.json file as JSON: " + str(e.args))
python
{ "resource": "" }
q12033
DXApp.install
train
def install(self, **kwargs): """ Installs the app in the current user's account. """ if self._dxid is not None: return dxpy.api.app_install(self._dxid, **kwargs) else: return dxpy.api.app_install('app-' + self._name, alias=self._alias, **kwargs)
python
{ "resource": "" }
q12034
DXApp.uninstall
train
def uninstall(self, **kwargs): """ Uninstalls the app from the current user's account. """ if self._dxid is not None: return dxpy.api.app_uninstall(self._dxid, **kwargs) else: return dxpy.api.app_uninstall('app-' + self._name, alias=self._alias, **kwargs)
python
{ "resource": "" }
q12035
DXApp.delete
train
def delete(self, **kwargs): """ Removes this app object from the platform. The current user must be a developer of the app. """ if self._dxid is not None: return dxpy.api.app_delete(self._dxid, **kwargs) else: return dxpy.api.app_delete('app-' + self._name, alias=self._alias, **kwargs)
python
{ "resource": "" }
q12036
_version_exists
train
def _version_exists(json_spec, name=None, version=None): """ Returns True if a global workflow with the given name and version already exists in the platform and the user has developer rights to the workflow. "name" and "version" can be passed if we already made a "describe" API call on the global workflow and so know the requested name and version already exists. """ requested_name = json_spec['name'] requested_version = json_spec['version'] if requested_name == name and requested_version == version: return True else: try: desc_output = dxpy.api.global_workflow_describe('globalworkflow-' + json_spec['name'], alias=json_spec['version'], input_params={"fields": {"name": True, "version": True}}) return desc_output['name'] == json_spec['name'] and desc_output['version'] == json_spec['version'] except dxpy.exceptions.DXAPIError: return False except: raise
python
{ "resource": "" }
q12037
_get_validated_stages
train
def _get_validated_stages(stages): """ Validates stages of the workflow as a list of dictionaries. """ if not isinstance(stages, list): raise WorkflowBuilderException("Stages must be specified as a list of dictionaries") validated_stages = [] for index, stage in enumerate(stages): validated_stages.append(_get_validated_stage(stage, index)) return validated_stages
python
{ "resource": "" }
q12038
_validate_json_for_regular_workflow
train
def _validate_json_for_regular_workflow(json_spec, args): """ Validates fields used only for building a regular, project-based workflow. """ validated = {} override_project_id, override_folder, override_workflow_name = \ dxpy.executable_builder.get_parsed_destination(args.destination) validated['project'] = _get_destination_project(json_spec, args, override_project_id) validated['folder'] = _get_destination_folder(json_spec, override_folder) workflow_name = _get_workflow_name(json_spec, override_workflow_name) if not workflow_name: print('Warning: workflow name is not specified') else: validated['name'] = workflow_name return validated
python
{ "resource": "" }
q12039
_validate_json_for_global_workflow
train
def _validate_json_for_global_workflow(json_spec, args): """ Validates fields used for building a global workflow. Since building a global workflow is done after all the underlying workflows are built, which may be time-consuming, we validate as much as possible here. """ # TODO: verify the billTo can build the workflow # TODO: if the global workflow build fails add an option to interactively change billto # TODO: (or other simple fields) instead of failing altogether # TODO: get a confirmation before building a workflow that may be costly if 'name' not in json_spec: raise WorkflowBuilderException( "dxworkflow.json contains no 'name' field, but it is required to build a global workflow") if not dxpy.executable_builder.GLOBAL_EXEC_NAME_RE.match(json_spec['name']): raise WorkflowBuilderException( "The name of your workflow must match /^[a-zA-Z0-9._-]+$/") if json_spec['name'] != json_spec['name'].lower(): logger.warn('workflow name "{}" should be all lowercase'.format(json_spec['name'])) if 'version' not in json_spec: raise WorkflowBuilderException( "dxworkflow.json contains no 'version' field, but it is required to build a global workflow") if not dxpy.executable_builder.GLOBAL_EXEC_VERSION_RE.match(json_spec['version']): logger.warn('"version" {} should be semver compliant (e.g. of the form X.Y.Z)'.format(json_spec['version'])) if 'details' in json_spec: if not isinstance(json_spec['details'], dict): raise WorkflowBuilderException( 'The field "details" must be a dictionary') if 'regionalOptions' in json_spec: if not (isinstance(json_spec['regionalOptions'], dict) and json_spec['regionalOptions'] and all([isinstance(i, dict) for i in json_spec['regionalOptions'].values()])): raise WorkflowBuilderException( 'The field "regionalOptions" must be a non-empty dictionary whose values are dictionaries') if args.bill_to: json_spec["billTo"] = args.bill_to
python
{ "resource": "" }
q12040
_create_temporary_projects
train
def _create_temporary_projects(enabled_regions, args): """ Creates a temporary project needed to build an underlying workflow for a global workflow. Returns a dictionary with region names as keys and project IDs as values The regions in which projects will be created can be: i. regions specified in dxworkflow.json "regionalOptions" ii. regions specified as an argument to "dx build" iii. current context project, if None of the above are set If both args and dxworkflow.json specify regions, they must match. """ # Create one temp project in each region projects_by_region = {} # Project IDs by region for region in enabled_regions: try: project_input = {"name": "Temporary build project for dx build global workflow", "region": region} if args.bill_to: project_input["billTo"] = args.bill_to temp_project = dxpy.api.project_new(project_input)["id"] projects_by_region[region] = temp_project logger.debug("Created temporary project {} to build in".format(temp_project)) except: # Clean up any temp projects that might have been created if projects_by_region: dxpy.executable_builder.delete_temporary_projects(projects_by_region.values()) err_exit() return projects_by_region
python
{ "resource": "" }
q12041
_build_global_workflow
train
def _build_global_workflow(json_spec, args): """ Creates a workflow in a temporary project for each enabled region and builds a global workflow on the platform based on these workflows. """ # First determine in which regions the global workflow needs to be available enabled_regions = _get_validated_enabled_regions(json_spec, args.region) # Verify all the stages are also enabled in these regions # TODO: Add support for dx building multi-region global workflows with applets _assert_executable_regions_match(enabled_regions, json_spec) workflows_by_region, projects_by_region = {}, {} # IDs by region try: # prepare "regionalOptions" field for the globalworkflow/new input workflows_by_region, projects_by_region = \ _build_underlying_workflows(enabled_regions, json_spec, args) regional_options = {} for region, workflow_id in workflows_by_region.items(): regional_options[region] = {'workflow': workflow_id} json_spec.update({'regionalOptions': regional_options}) # leave only fields that are actually used to build the workflow gwf_provided_keys = GLOBALWF_SUPPORTED_KEYS.intersection(set(json_spec.keys())) gwf_final_json = dict((k, v) for k, v in json_spec.items() if k in gwf_provided_keys) # we don't want to print the whole documentation to the screen so we'll remove these fields print_spec = copy.deepcopy(gwf_final_json) if "description" in gwf_final_json: del print_spec["description"] if "developerNotes" in gwf_final_json: del print_spec["developerNotes"] logger.info("Will create global workflow with spec: {}".format(json.dumps(print_spec))) # Create a new global workflow version on the platform global_workflow_id = dxpy.api.global_workflow_new(gwf_final_json)["id"] logger.info("Uploaded global workflow {n}/{v} successfully".format(n=gwf_final_json["name"], v=gwf_final_json["version"])) logger.info("You can publish this workflow with:") logger.info(" dx publish {n}/{v}".format(n=gwf_final_json["name"], v=gwf_final_json["version"])) finally: # Clean up if projects_by_region: dxpy.executable_builder.delete_temporary_projects(projects_by_region.values()) # Set any additional fields on the created workflow try: _set_categories_on_workflow(global_workflow_id, gwf_final_json.get("categories", [])) except: logger.warn( "The workflow {n}/{v} was created but setting categories failed".format(n=gwf_final_json['name'], v=gwf_final_json['version'])) raise return global_workflow_id
python
{ "resource": "" }
q12042
_build_or_update_workflow
train
def _build_or_update_workflow(json_spec, args): """ Creates or updates a workflow on the platform. Returns the workflow ID, or None if the workflow cannot be created. """ try: if args.mode == 'workflow': json_spec = _get_validated_json(json_spec, args) workflow_id = _build_regular_workflow(json_spec) elif args.mode == 'globalworkflow': # Verify if the global workflow already exists and if the user has developer rights to it # If the global workflow name doesn't exist, the user is free to build it # If the name does exist two things can be done: # * either update the requested version, if this version already exists # * or create the version if it doesn't exist existing_workflow = dxpy.executable_builder.verify_developer_rights('globalworkflow-' + json_spec['name']) if existing_workflow and _version_exists(json_spec, existing_workflow.name, existing_workflow.version): workflow_id = _update_global_workflow(json_spec, args, existing_workflow.id) else: json_spec = _get_validated_json(json_spec, args) workflow_id = _build_global_workflow(json_spec, args) else: raise WorkflowBuilderException("Unrecognized workflow type: {}".format(args.mode)) except dxpy.exceptions.DXAPIError as e: raise e return workflow_id
python
{ "resource": "" }
q12043
_readable_part_size
train
def _readable_part_size(num_bytes): "Returns the file size in readable form." B = num_bytes KB = float(1024) MB = float(KB * 1024) GB = float(MB * 1024) TB = float(GB * 1024) if B < KB: return '{0} {1}'.format(B, 'bytes' if B != 1 else 'byte') elif KB <= B < MB: return '{0:.2f} KiB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MiB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GiB'.format(B/GB) elif TB <= B: return '{0:.2f} TiB'.format(B/TB)
python
{ "resource": "" }
q12044
DXFile.flush
train
def flush(self, multithread=True, **kwargs): ''' Flushes the internal write buffer. ''' if self._write_buf.tell() > 0: data = self._write_buf.getvalue() self._write_buf = BytesIO() if multithread: self._async_upload_part_request(data, index=self._cur_part, **kwargs) else: self.upload_part(data, self._cur_part, **kwargs) self._cur_part += 1 if len(self._http_threadpool_futures) > 0: dxpy.utils.wait_for_all_futures(self._http_threadpool_futures) try: for future in self._http_threadpool_futures: if future.exception() != None: raise future.exception() finally: self._http_threadpool_futures = set()
python
{ "resource": "" }
q12045
setup_ssh_tunnel
train
def setup_ssh_tunnel(job_id, local_port, remote_port): """ Setup an ssh tunnel to the given job-id. This will establish the port over the given local_port to the given remote_port and then exit, keeping the tunnel in place until the job is terminated. """ cmd = ['dx', 'ssh', '--suppress-running-check', job_id, '-o', 'StrictHostKeyChecking no'] cmd += ['-f', '-L', '{0}:localhost:{1}'.format(local_port, remote_port), '-N'] subprocess.check_call(cmd)
python
{ "resource": "" }
q12046
poll_for_server_running
train
def poll_for_server_running(job_id): """ Poll for the job to start running and post the SERVER_READY_TAG. """ sys.stdout.write('Waiting for server in {0} to initialize ...'.format(job_id)) sys.stdout.flush() desc = dxpy.describe(job_id) # Keep checking until the server has begun or it has failed. while(SERVER_READY_TAG not in desc['tags'] and desc['state'] != 'failed'): time.sleep(SLEEP_PERIOD) sys.stdout.write('.') sys.stdout.flush() desc = dxpy.describe(job_id) # If the server job failed, provide friendly advice. if desc['state'] == 'failed': msg = RED('Error:') + ' Server failed to run.\n' msg += 'You may want to check the job logs by running:' msg += BOLD('dx watch {0}'.format(job_id)) err_exit(msg)
python
{ "resource": "" }
q12047
multi_platform_open
train
def multi_platform_open(cmd): """ Take the given command and use the OS to automatically open the appropriate resource. For instance, if a URL is provided, this will have the OS automatically open the URL in the default web browser. """ if platform == "linux" or platform == "linux2": cmd = ['xdg-open', cmd] elif platform == "darwin": cmd = ['open', cmd] elif platform == "win32": cmd = ['start', cmd] subprocess.check_call(cmd)
python
{ "resource": "" }
q12048
get_notebook_app_versions
train
def get_notebook_app_versions(): """ Get the valid version numbers of the notebook app. """ notebook_apps = dxpy.find_apps(name=NOTEBOOK_APP, all_versions=True) versions = [str(dxpy.describe(app['id'])['version']) for app in notebook_apps] return versions
python
{ "resource": "" }
q12049
run_notebook
train
def run_notebook(args, ssh_config_check): """ Launch the notebook server. """ # Check that ssh is setup. Currently notebooks require ssh for tunelling. ssh_config_check() if args.only_check_config: return # If the user requested a specific version of the notebook server, # get the executable id. if args.version is not None: executable = get_app_from_path('app-{0}/{1}'.format(NOTEBOOK_APP, args.version)) if executable is not None and 'id' in executable: executable = executable['id'] else: msg = RED('Warning:') + ' Invalid notebook version: {0}\nValid versions are: '.format(args.version) msg += BOLD('{0}'.format(str(get_notebook_app_versions()))) err_exit(msg) else: executable = 'app-{0}'.format(NOTEBOOK_APP) # Compose the command to launch the notebook cmd = ['dx', 'run', executable, '-inotebook_type={0}'.format(args.notebook_type)] cmd += ['-iinput_files={0}'.format(f) for f in args.notebook_files] cmd += ['-itimeout={0}'.format(args.timeout), '-y', '--brief', '--allow-ssh', '--instance-type', args.instance_type] if args.spark: cmd += ['-iinstall_spark=true'] if args.snapshot: cmd += ['-isnapshot={0}'.format(args.snapshot)] job_id = subprocess.check_output(cmd).strip() poll_for_server_running(job_id) if args.notebook_type in {'jupyter', 'jupyter_lab', 'jupyter_notebook'}: remote_port = 8888 setup_ssh_tunnel(job_id, args.port, remote_port) if args.open_server: multi_platform_open('http://localhost:{0}'.format(args.port)) print('A web browser should have opened to connect you to your notebook.') print('If no browser appears, or if you need to reopen a browser at any point, you should be able to point your browser to http://localhost:{0}'.format(args.port))
python
{ "resource": "" }
q12050
ModelDatastoreInputReader._validate_filters
train
def _validate_filters(cls, filters, model_class): """Validate user supplied filters. Validate filters are on existing properties and filter values have valid semantics. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type is up to the property's type. model_class: the db.Model class for the entity type to apply filters on. Raises: BadReaderParamsError: if any filter is invalid in any way. """ if not filters: return properties = model_class.properties() for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class.kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop].validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
python
{ "resource": "" }
q12051
_normalize_entity
train
def _normalize_entity(value): """Return an entity from an entity or model instance.""" if ndb is not None and isinstance(value, ndb.Model): return None if getattr(value, "_populate_internal_entity", None): return value._populate_internal_entity() return value
python
{ "resource": "" }
q12052
_normalize_key
train
def _normalize_key(value): """Return a key from an entity, model instance, key, or key string.""" if ndb is not None and isinstance(value, (ndb.Model, ndb.Key)): return None if getattr(value, "key", None): return value.key() elif isinstance(value, basestring): return datastore.Key(value) else: return value
python
{ "resource": "" }
q12053
_ItemList.append
train
def append(self, item): """Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list. """ if self.should_flush(): self.flush() self.items.append(item)
python
{ "resource": "" }
q12054
_ItemList.flush
train
def flush(self): """Force a flush.""" if not self.items: return retry = 0 options = {"deadline": DATASTORE_DEADLINE} while retry <= self.__timeout_retries: try: self.__flush_function(self.items, options) self.clear() break except db.Timeout, e: logging.warning(e) logging.warning("Flushing '%s' timed out. Will retry for the %s time.", self, retry) retry += 1 options["deadline"] *= 2 except apiproxy_errors.RequestTooLargeError: self._log_largest_items() raise else: raise
python
{ "resource": "" }
q12055
_MutationPool.put
train
def put(self, entity): """Registers entity to put to datastore. Args: entity: an entity or model instance to put. """ actual_entity = _normalize_entity(entity) if actual_entity is None: return self.ndb_put(entity) self.puts.append(actual_entity)
python
{ "resource": "" }
q12056
_MutationPool.delete
train
def delete(self, entity): """Registers entity to delete from datastore. Args: entity: an entity, model instance, or key to delete. """ key = _normalize_key(entity) if key is None: return self.ndb_delete(entity) self.deletes.append(key)
python
{ "resource": "" }
q12057
_MutationPool._flush_puts
train
def _flush_puts(self, items, options): """Flush all puts to datastore.""" datastore.Put(items, config=self._create_config(options))
python
{ "resource": "" }
q12058
_MutationPool._flush_ndb_puts
train
def _flush_ndb_puts(self, items, options): """Flush all NDB puts to datastore.""" assert ndb is not None ndb.put_multi(items, config=self._create_config(options))
python
{ "resource": "" }
q12059
_MutationPool._create_config
train
def _create_config(self, options): """Creates datastore Config. Returns: A datastore_rpc.Configuration instance. """ return datastore.CreateConfig(deadline=options["deadline"], force_writes=self.force_writes)
python
{ "resource": "" }
q12060
AbstractKeyRangeIterator.to_json
train
def to_json(self): """Serializes all states into json form. Returns: all states in json-compatible map. """ cursor = self._get_cursor() cursor_object = False if cursor and isinstance(cursor, datastore_query.Cursor): cursor = cursor.to_websafe_string() cursor_object = True return {"key_range": self._key_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "cursor_object": cursor_object}
python
{ "resource": "" }
q12061
AbstractKeyRangeIterator.from_json
train
def from_json(cls, json): """Reverse of to_json.""" obj = cls(key_range.KeyRange.from_json(json["key_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
python
{ "resource": "" }
q12062
find_mapreduce_yaml
train
def find_mapreduce_yaml(status_file=__file__): """Traverse directory trees to find mapreduce.yaml file. Begins with the location of status.py and then moves on to check the working directory. Args: status_file: location of status.py, overridable for testing purposes. Returns: the path of mapreduce.yaml file or None if not found. """ checked = set() yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked) if not yaml: yaml = _find_mapreduce_yaml(os.getcwd(), checked) return yaml
python
{ "resource": "" }
q12063
_find_mapreduce_yaml
train
def _find_mapreduce_yaml(start, checked): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of mapreduce.yaml is found. Checked is present both to make loop termination easy to reason about and so that the same directories do not get rechecked. Args: start: the path to start in and work upward from checked: the set of already examined directories Returns: the path of mapreduce.yaml file or None if not found. """ dir = start while dir not in checked: checked.add(dir) for mr_yaml_name in MR_YAML_NAMES: yaml_path = os.path.join(dir, mr_yaml_name) if os.path.exists(yaml_path): return yaml_path dir = os.path.dirname(dir) return None
python
{ "resource": "" }
q12064
parse_mapreduce_yaml
train
def parse_mapreduce_yaml(contents): """Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file. """ try: builder = yaml_object.ObjectBuilder(MapReduceYaml) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(contents) mr_info = handler.GetResults() except (ValueError, yaml_errors.EventError), e: raise errors.BadYamlError(e) if len(mr_info) < 1: raise errors.BadYamlError("No configs found in mapreduce.yaml") if len(mr_info) > 1: raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info)) jobs = mr_info[0] job_names = set(j.name for j in jobs.mapreduce) if len(jobs.mapreduce) != len(job_names): raise errors.BadYamlError( "Overlapping mapreduce names; names must be unique") return jobs
python
{ "resource": "" }
q12065
get_mapreduce_yaml
train
def get_mapreduce_yaml(parse=parse_mapreduce_yaml): """Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing. """ mr_yaml_path = find_mapreduce_yaml() if not mr_yaml_path: raise errors.MissingYamlError() mr_yaml_file = open(mr_yaml_path) try: return parse(mr_yaml_file.read()) finally: mr_yaml_file.close()
python
{ "resource": "" }
q12066
MapReduceYaml.to_dict
train
def to_dict(mapreduce_yaml): """Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries. """ all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
python
{ "resource": "" }
q12067
_sort_records_map
train
def _sort_records_map(records): """Map function sorting records. Converts records to KeyValue protos, sorts them by key and writes them into new GCS file. Creates _OutputFile entity to record resulting file name. Args: records: list of records which are serialized KeyValue protos. """ ctx = context.get() l = len(records) key_records = [None] * l logging.debug("Parsing") for i in range(l): proto = kv_pb.KeyValue() proto.ParseFromString(records[i]) key_records[i] = (proto.key(), records[i]) logging.debug("Sorting") key_records.sort(cmp=_compare_keys) logging.debug("Writing") mapper_spec = ctx.mapreduce_spec.mapper params = input_readers._get_params(mapper_spec) bucket_name = params.get("bucket_name") filename = (ctx.mapreduce_spec.name + "/" + ctx.mapreduce_id + "/output-" + ctx.shard_id + "-" + str(int(time.time()))) full_filename = "/%s/%s" % (bucket_name, filename) filehandle = cloudstorage.open(full_filename, mode="w") with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool: for key_record in key_records: pool.append(key_record[1]) logging.debug("Finalizing") filehandle.close() entity = _OutputFile(key_name=full_filename, parent=_OutputFile.get_root_key(ctx.mapreduce_id)) entity.put()
python
{ "resource": "" }
q12068
_merge_map
train
def _merge_map(key, values, partial): """A map function used in merge phase. Stores (key, values) into KeyValues proto and yields its serialization. Args: key: values key. values: values themselves. partial: True if more values for this key will follow. False otherwise. Yields: The proto. """ proto = kv_pb.KeyValues() proto.set_key(key) proto.value_list().extend(values) yield proto.Encode()
python
{ "resource": "" }
q12069
_hashing_map
train
def _hashing_map(binary_record): """A map function used in hash phase. Reads KeyValue from binary record. Args: binary_record: The binary record. Yields: The (key, value). """ proto = kv_pb.KeyValue() proto.ParseFromString(binary_record) yield (proto.key(), proto.value())
python
{ "resource": "" }
q12070
_MergingReader.split_input
train
def split_input(cls, mapper_spec): """Split input into multiple shards.""" filelists = mapper_spec.params[cls.FILES_PARAM] max_values_count = mapper_spec.params.get(cls.MAX_VALUES_COUNT_PARAM, -1) max_values_size = mapper_spec.params.get(cls.MAX_VALUES_SIZE_PARAM, -1) return [cls([0] * len(files), max_values_count, max_values_size) for files in filelists]
python
{ "resource": "" }
q12071
_MergingReader.validate
train
def validate(cls, mapper_spec): """Validate reader parameters in mapper_spec.""" if mapper_spec.input_reader_class() != cls: raise errors.BadReaderParamsError("Input reader class mismatch") params = mapper_spec.params if cls.FILES_PARAM not in params: raise errors.BadReaderParamsError("Missing files parameter.")
python
{ "resource": "" }
q12072
_HashingGCSOutputWriter.validate
train
def validate(cls, mapper_spec): """Validates mapper specification. Args: mapper_spec: an instance of model.MapperSpec to validate. Raises: BadWriterParamsError: when Output writer class mismatch. """ if mapper_spec.output_writer_class() != cls: raise errors.BadWriterParamsError("Output writer class mismatch") params = output_writers._get_params(mapper_spec) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in params: raise errors.BadWriterParamsError( "%s is required for the _HashingGCSOutputWriter" % cls.BUCKET_NAME_PARAM)
python
{ "resource": "" }
q12073
_HashingGCSOutputWriter.to_json
train
def to_json(self): """Returns writer state to serialize in json. Returns: A json-izable version of the OutputWriter state. """ # Use the member variable (since we don't have access to the context) to # flush each pool to minimize the size of each filehandle before we # serialize it. for pool in self._pools: if pool is not None: pool.flush(True) return {"filehandles": pickle.dumps(self._filehandles)}
python
{ "resource": "" }
q12074
JobConfig._get_mapper_params
train
def _get_mapper_params(self): """Converts self to model.MapperSpec.params.""" reader_params = self.input_reader_cls.params_to_json( self.input_reader_params) # TODO(user): Do the same for writer params. return {"input_reader": reader_params, "output_writer": self.output_writer_params}
python
{ "resource": "" }
q12075
JobConfig._get_mapper_spec
train
def _get_mapper_spec(self): """Converts self to model.MapperSpec.""" # pylint: disable=g-import-not-at-top from mapreduce import model return model.MapperSpec( handler_spec=util._obj_to_path(self.mapper), input_reader_spec=util._obj_to_path(self.input_reader_cls), params=self._get_mapper_params(), shard_count=self.shard_count, output_writer_spec=util._obj_to_path(self.output_writer_cls))
python
{ "resource": "" }
q12076
JobConfig._get_mr_params
train
def _get_mr_params(self): """Converts self to model.MapreduceSpec.params.""" return {"force_writes": self._force_writes, "done_callback": self.done_callback_url, "user_params": self.user_params, "shard_max_attempts": self.shard_max_attempts, "task_max_attempts": self._task_max_attempts, "task_max_data_processing_attempts": self._task_max_data_processing_attempts, "queue_name": self.queue_name, "base_path": self._base_path, "app_id": self._app, "api_version": self._api_version}
python
{ "resource": "" }
q12077
JobConfig._get_default_mr_params
train
def _get_default_mr_params(cls): """Gets default values for old API.""" cfg = cls(_lenient=True) mr_params = cfg._get_mr_params() mr_params["api_version"] = 0 return mr_params
python
{ "resource": "" }
q12078
JobConfig._to_map_job_config
train
def _to_map_job_config(cls, mr_spec, # TODO(user): Remove this parameter after it can be # read from mr_spec. queue_name): """Converts model.MapreduceSpec back to JobConfig. This method allows our internal methods to use JobConfig directly. This method also allows us to expose JobConfig as an API during execution, despite that it is not saved into datastore. Args: mr_spec: model.MapreduceSpec. queue_name: queue name. Returns: The JobConfig object for this job. """ mapper_spec = mr_spec.mapper # 0 means all the old APIs before api_version is introduced. api_version = mr_spec.params.get("api_version", 0) old_api = api_version == 0 # Deserialize params from json if input_reader/output_writer are new API. input_reader_cls = mapper_spec.input_reader_class() input_reader_params = input_readers._get_params(mapper_spec) if issubclass(input_reader_cls, input_reader.InputReader): input_reader_params = input_reader_cls.params_from_json( input_reader_params) output_writer_cls = mapper_spec.output_writer_class() output_writer_params = output_writers._get_params(mapper_spec) # TODO(user): Call json (de)serialization for writer. # if (output_writer_cls and # issubclass(output_writer_cls, output_writer.OutputWriter)): # output_writer_params = output_writer_cls.params_from_json( # output_writer_params) # We can not always convert MapreduceSpec generated by older API # to JobConfig. Thus, mr framework should use/expose the returned JobConfig # object with caution when a job is started with an old API. # In this case, this method only tries not to blow up and assemble a # JobConfig object as accurate as possible. return cls(_lenient=old_api, job_name=mr_spec.name, job_id=mr_spec.mapreduce_id, # handler_spec from older API may not have map_job.Mapper type. mapper=util.for_name(mapper_spec.handler_spec), input_reader_cls=input_reader_cls, input_reader_params=input_reader_params, output_writer_cls=output_writer_cls, output_writer_params=output_writer_params, shard_count=mapper_spec.shard_count, queue_name=queue_name, user_params=mr_spec.params.get("user_params"), shard_max_attempts=mr_spec.params.get("shard_max_attempts"), done_callback_url=mr_spec.params.get("done_callback"), _force_writes=mr_spec.params.get("force_writes"), _base_path=mr_spec.params["base_path"], _task_max_attempts=mr_spec.params.get("task_max_attempts"), _task_max_data_processing_attempts=( mr_spec.params.get("task_max_data_processing_attempts")), _hooks_cls=util.for_name(mr_spec.hooks_class_name), _app=mr_spec.params.get("app_id"), _api_version=api_version)
python
{ "resource": "" }
q12079
RecordsWriter.__write_record
train
def __write_record(self, record_type, data): """Write single physical record.""" length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(_HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += _HEADER_LENGTH + length
python
{ "resource": "" }
q12080
RecordsWriter.write
train
def write(self, data): """Write single record. Args: data: record data to write as string, byte array or byte sequence. """ block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: # Header won't fit into remainder self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = _BLOCK_SIZE if block_remaining < len(data) + _HEADER_LENGTH: first_chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining >= len(data) + _HEADER_LENGTH: self.__write_record(_RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(_RECORD_TYPE_FULL, data)
python
{ "resource": "" }
q12081
RecordsWriter._pad_block
train
def _pad_block(self): """Pad block with 0. Pad current block with 0. Reader will simply treat these as corrupted record and skip the block. This method is idempotent. """ pad_length = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: self.__writer.write('\x00' * pad_length) self.__position += pad_length
python
{ "resource": "" }
q12082
RecordsReader.__try_read_record
train
def __try_read_record(self): """Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read. """ block_remaining = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: return ('', _RECORD_TYPE_NONE) header = self.__reader.read(_HEADER_LENGTH) if len(header) != _HEADER_LENGTH: raise EOFError('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + _HEADER_LENGTH > block_remaining: # A record can't be bigger than one block. raise errors.InvalidRecordError('Length is too big') data = self.__reader.read(length) if len(data) != length: raise EOFError('Not enough data read. Expected: %s but got %s' % (length, len(data))) if record_type == _RECORD_TYPE_NONE: return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if actual_crc != crc: raise errors.InvalidRecordError('Data crc does not match') return (data, record_type)
python
{ "resource": "" }
q12083
RecordsReader.__sync
train
def __sync(self): """Skip reader to the block boundary.""" pad_length = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: data = self.__reader.read(pad_length) if len(data) != pad_length: raise EOFError('Read %d bytes instead of %d' % (len(data), pad_length))
python
{ "resource": "" }
q12084
RecordsReader.read
train
def read(self): """Reads record from current position in reader. Returns: original bytes stored in a single record. """ data = None while True: last_offset = self.tell() try: (chunk, record_type) = self.__try_read_record() if record_type == _RECORD_TYPE_NONE: self.__sync() elif record_type == _RECORD_TYPE_FULL: if data is not None: logging.warning( "Ordering corruption: Got FULL record while already " "in a chunk at offset %d", last_offset) return chunk elif record_type == _RECORD_TYPE_FIRST: if data is not None: logging.warning( "Ordering corruption: Got FIRST record while already " "in a chunk at offset %d", last_offset) data = chunk elif record_type == _RECORD_TYPE_MIDDLE: if data is None: logging.warning( "Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d", last_offset) else: data += chunk elif record_type == _RECORD_TYPE_LAST: if data is None: logging.warning( "Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d", last_offset) else: result = data + chunk data = None return result else: raise errors.InvalidRecordError( "Unsupported record type: %s" % record_type) except errors.InvalidRecordError, e: logging.warning("Invalid record encountered at %s (%s). Syncing to " "the next block", last_offset, e) data = None self.__sync()
python
{ "resource": "" }
q12085
MapperPipeline.run
train
def run(self, job_name, handler_spec, input_reader_spec, output_writer_spec=None, params=None, shards=None, base_path=None): """Start a mapreduce job. Args: job_name: mapreduce name. Only for display purpose. handler_spec: fully qualified name to your map function/class. input_reader_spec: fully qualified name to input reader class. output_writer_spec: fully qualified name to output writer class. params: a dictionary of parameters for input reader and output writer initialization. shards: number of shards. This provides a guide to mapreduce. The real number of shards is determined by how input are splited. """ if shards is None: shards = parameters.config.SHARD_COUNT if base_path is None: base_path = parameters.config.BASE_PATH mapreduce_id = control.start_map( job_name, handler_spec, input_reader_spec, params or {}, mapreduce_parameters={ "done_callback": self.get_callback_url(), "done_callback_method": "GET", "pipeline_id": self.pipeline_id, "base_path": base_path, }, shard_count=shards, output_writer_spec=output_writer_spec, queue_name=self.queue_name, ) self.fill(self.outputs.job_id, mapreduce_id) self.set_status(console_url="%s/detail?mapreduce_id=%s" % ( (base_path, mapreduce_id)))
python
{ "resource": "" }
q12086
MapperPipeline.callback
train
def callback(self): """Callback after this async pipeline finishes.""" if self.was_aborted: return mapreduce_id = self.outputs.job_id.value mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) if mapreduce_state.result_status != model.MapreduceState.RESULT_SUCCESS: self.retry("Job %s had status %s" % ( mapreduce_id, mapreduce_state.result_status)) return mapper_spec = mapreduce_state.mapreduce_spec.mapper outputs = [] output_writer_class = mapper_spec.output_writer_class() if (output_writer_class and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): outputs = output_writer_class.get_filenames(mapreduce_state) self.fill(self.outputs.result_status, mapreduce_state.result_status) self.fill(self.outputs.counters, mapreduce_state.counters_map.to_dict()) self.complete(outputs)
python
{ "resource": "" }
q12087
start_map
train
def start_map(name, handler_spec, reader_spec, mapper_parameters, shard_count=None, output_writer_spec=None, mapreduce_parameters=None, base_path=None, queue_name=None, eta=None, countdown=None, hooks_class_name=None, _app=None, in_xg_transaction=False): """Start a new, mapper-only mapreduce. Deprecated! Use map_job.start instead. If a value can be specified both from an explicit argument and from a dictionary, the value from the explicit argument wins. Args: name: mapreduce name. Used only for display purposes. handler_spec: fully qualified name of mapper handler function/class to call. reader_spec: fully qualified name of mapper reader to use mapper_parameters: dictionary of parameters to pass to mapper. These are mapper-specific and also used for reader/writer initialization. Should have format {"input_reader": {}, "output_writer":{}}. Old deprecated style does not have sub dictionaries. shard_count: number of shards to create. mapreduce_parameters: dictionary of mapreduce parameters relevant to the whole job. base_path: base path of mapreduce library handler specified in app.yaml. "/mapreduce" by default. queue_name: taskqueue queue name to be used for mapreduce tasks. see util.get_queue_name. eta: absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: time in seconds into the future that this MR should execute. Defaults to zero. hooks_class_name: fully qualified name of a hooks.Hooks subclass. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: mapreduce id as string. """ if shard_count is None: shard_count = parameters.config.SHARD_COUNT if mapper_parameters: mapper_parameters = dict(mapper_parameters) # Make sure this old API fill all parameters with default values. mr_params = map_job.JobConfig._get_default_mr_params() if mapreduce_parameters: mr_params.update(mapreduce_parameters) # Override default values if user specified them as arguments. if base_path: mr_params["base_path"] = base_path mr_params["queue_name"] = util.get_queue_name(queue_name) mapper_spec = model.MapperSpec(handler_spec, reader_spec, mapper_parameters, shard_count, output_writer_spec=output_writer_spec) if in_xg_transaction and not db.is_in_transaction(): logging.warning("Expects an opened xg transaction to start mapreduce " "when transactional is True.") return handlers.StartJobHandler._start_map( name, mapper_spec, mr_params, # TODO(user): Now that "queue_name" is part of mr_params. # Remove all the other ways to get queue_name after one release. queue_name=mr_params["queue_name"], eta=eta, countdown=countdown, hooks_class_name=hooks_class_name, _app=_app, in_xg_transaction=in_xg_transaction)
python
{ "resource": "" }
q12088
Job.get_status
train
def get_status(self): """Get status enum. Returns: One of the status enum. """ self.__update_state() if self._state.active: return self.RUNNING else: return self._state.result_status
python
{ "resource": "" }
q12089
Job.get_counter
train
def get_counter(self, counter_name, default=0): """Get the value of the named counter from this job. When a job is running, counter values won't be very accurate. Args: counter_name: name of the counter in string. default: default value if the counter doesn't exist. Returns: Value in int of the named counter. """ self.__update_state() return self._state.counters_map.get(counter_name, default)
python
{ "resource": "" }
q12090
Job.get_outputs
train
def get_outputs(self): """Get outputs of this job. Should only call if status is SUCCESS. Yields: Iterators, one for each shard. Each iterator is from the argument of map_job.output_writer.commit_output. """ assert self.SUCCESS == self.get_status() ss = model.ShardState.find_all_by_mapreduce_state(self._state) for s in ss: yield iter(s.writer_state.get("outs", []))
python
{ "resource": "" }
q12091
Job.submit
train
def submit(cls, job_config, in_xg_transaction=False): """Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job. """ cls.__validate_job_config(job_config) mapper_spec = job_config._get_mapper_spec() # Create mr spec. mapreduce_params = job_config._get_mr_params() mapreduce_spec = model.MapreduceSpec( job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls)) # Save states and enqueue task. if in_xg_transaction: propagation = db.MANDATORY else: propagation = db.INDEPENDENT state = None @db.transactional(propagation=propagation) def _txn(): state = cls.__create_and_save_state(job_config, mapreduce_spec) cls.__add_kickoff_task(job_config, mapreduce_spec) return state state = _txn() return cls(state)
python
{ "resource": "" }
q12092
Job.__update_state
train
def __update_state(self): """Fetches most up to date state from db.""" # Only if the job was not in a terminal state. if self._state.active: self._state = self.__get_state_by_id(self.job_config.job_id)
python
{ "resource": "" }
q12093
Job.__get_state_by_id
train
def __get_state_by_id(cls, job_id): """Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing. """ state = model.MapreduceState.get_by_job_id(job_id) if state is None: raise ValueError("Job state for job %s is missing." % job_id) return state
python
{ "resource": "" }
q12094
Job.__create_and_save_state
train
def __create_and_save_state(cls, job_config, mapreduce_spec): """Save map job state to datastore. Save state to datastore so that UI can see it immediately. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec. Returns: model.MapreduceState for this job. """ state = model.MapreduceState.create_new(job_config.job_id) state.mapreduce_spec = mapreduce_spec state.active = True state.active_shards = 0 state.app_id = job_config._app config = datastore_rpc.Configuration(force_writes=job_config._force_writes) state.put(config=config) return state
python
{ "resource": "" }
q12095
Job.__add_kickoff_task
train
def __add_kickoff_task(cls, job_config, mapreduce_spec): """Add kickoff task to taskqueue. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec, """ params = {"mapreduce_id": job_config.job_id} # Task is not named so that it can be added within a transaction. kickoff_task = taskqueue.Task( # TODO(user): Perhaps make this url a computed field of job_config. url=job_config._base_path + "/kickoffjob_callback/" + job_config.job_id, headers=util._get_task_headers(job_config.job_id), params=params) if job_config._hooks_cls: hooks = job_config._hooks_cls(mapreduce_spec) try: hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name) return except NotImplementedError: pass kickoff_task.add(job_config.queue_name, transactional=True)
python
{ "resource": "" }
q12096
JsonMixin.to_json_str
train
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ _json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception("Could not serialize JSON: %r", _json) raise
python
{ "resource": "" }
q12097
JsonMixin.from_json_str
train
def from_json_str(cls, json_str): """Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string. """ return cls.from_json(json.loads(json_str, cls=JsonDecoder))
python
{ "resource": "" }
q12098
JsonProperty.get_value_for_datastore
train
def get_value_for_datastore(self, model_instance): """Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value. """ value = super(JsonProperty, self).get_value_for_datastore(model_instance) if not value: return None json_value = value if not isinstance(value, dict): json_value = value.to_json() if not json_value: return None return datastore_types.Text(json.dumps( json_value, sort_keys=True, cls=JsonEncoder))
python
{ "resource": "" }
q12099
JsonProperty.make_value_from_datastore
train
def make_value_from_datastore(self, value): """Convert value from datastore representation. Args: value: datastore value. Returns: value to store in the model. """ if value is None: return None _json = json.loads(value, cls=JsonDecoder) if self.data_type == dict: return _json return self.data_type.from_json(_json)
python
{ "resource": "" }