code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
m = ( "\n" "######################################################################################\n" # noqa "# #\n" # noqa "# Your version of Python appears to be out of date and lack important security #\n" # noqa "# features. Please update to Python >= 2.7.9 or `pip install requests[security]`. #\n" # noqa "# #\n" # noqa "# InsecurePlatformWarning: A true SSLContext object is not available. This #\n" # noqa "# prevents urllib3 from configuring SSL appropriately and may cause certain #\n" # noqa "# SSL connections to fail. For more information, see #\n" # noqa "# https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning. #\n" # noqa "# #\n" # noqa "######################################################################################\n" ) # noqa if is_insecure_platform(): click.echo(m, err=True) return True else: cli_log.debug("Python SSLContext passed") return False
def warn_if_insecure_platform()
Produces a nice message if SSLContext object is not available. Also returns True -> platform is insecure False -> platform is secure
3.468157
3.261243
1.063446
r = requests.get(url, stream=True) if r.status_code != 200: cli_log.error("Failed to download file: %s" % r.json()["message"]) local_full_path = get_download_dest(input_path, r.url) original_filename = os.path.split(local_full_path)[-1] with open(local_full_path, "wb") as f: click.echo("Downloading {}".format(original_filename), err=True) for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() pprint("Successfully downloaded %s to %s" % (original_filename, local_full_path), True)
def download_file_helper(url, input_path)
Manages the chunked downloading of a file given an url
2.392796
2.393686
0.999628
for ext in SUPPORTED_EXTENSIONS: if f.endswith(ext): return True log.error("Failed upload: Not an allowed file extension: %s", f) raise SystemExit
def check_for_allowed_file(f)
Checks a file extension against a list of seq file exts
5.155034
5.281613
0.976034
home_dir = os.path.expanduser("~") abs_path = os.path.abspath(fp) return abs_path.replace(home_dir, "~")
def collapse_user(fp)
Converts a path back to ~/ from expanduser()
2.887978
2.4721
1.168229
@wraps(fn) def telemetry_wrapper(*args, **kwargs): # By default, do not instantiate a client, # and inherit the telemetry settings passed client = None if len(args) > 0 and isinstance(args[0], click.Context): ctx = args[0] # First try to get off API obj if ctx.obj and ctx.obj.get("API") is not None: client = ctx.obj["API"]._raven_client # Else try to see if telemetry param is set elif ctx.params.get("telemetry", False): client = get_raven_client() # Finally check for the ctx.obj['TELEMETRY'] bool elif ctx.obj and ctx.obj.get("TELEMETRY", False): client = get_raven_client() try: return fn(*args, **kwargs) except Exception as e: if client: client.captureException() client.context.clear() sys.stdout = StringIO() raise e return telemetry_wrapper
def telemetry(fn)
Decorator for CLI and other functions that need special Sentry client handling. This function is only required for functions that may exit *before* we set up the ._raven_client object on the Api instance *or* that specifically catch and re-raise exceptions or call sys.exit directly. Note that this also overwrites verbose Raven logs on exit ("Sentry is waiting to send..."), see https://github.com/getsentry/raven-python/issues/904 for more details.
3.892829
3.581196
1.087019
@wraps(fn) def pretty_errors_wrapper(*args, **kwargs): try: fn(*args, **kwargs) except (OneCodexException, UploadException) as e: sys.stderr.write("\nERROR: {}".format(e)) sys.stderr.write("\nPlease feel free to contact us for help at help@onecodex.com\n\n") sys.exit(1) return pretty_errors_wrapper
def pretty_errors(fn)
Decorator for the CLI for catching errors and then calling sys.exit(1). For now, this is intended for use with the CLI and scripts where we only use OneCodexExceptions (incl. ValidationError) and UploadException.
3.341371
2.489307
1.34229
try: atexit.unregister(func, *args, **kwargs) except AttributeError: # This code runs in Python 2.7 *only* # Only replace with a noop, don't delete during iteration for i in range(len(atexit._exithandlers)): if atexit._exithandlers[i] == (func, args, kwargs): atexit._exithandlers[i] = (lambda: None, [], {}) break
def atexit_unregister(func, *args, **kwargs)
Python 2/3 compatible method for unregistering exit function. Python2 has no atexit.unregister function :/
3.387188
3.376664
1.003117
from skbio.tree import TreeNode # build all the nodes nodes = {} for tax_id in self.taxonomy.index: node = TreeNode(name=tax_id, length=1) node.tax_name = self.taxonomy["name"][tax_id] node.rank = self.taxonomy["rank"][tax_id] node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id] nodes[tax_id] = node # generate all the links for tax_id in self.taxonomy.index: try: parent = nodes[nodes[tax_id].parent_tax_id] except KeyError: if tax_id != "1": warnings.warn( "tax_id={} has parent_tax_id={} which is not in tree" "".format(tax_id, nodes[tax_id].parent_tax_id) ) continue parent.append(nodes[tax_id]) return nodes["1"]
def tree_build(self)
Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or `SampleCollection`. Returns ------- `skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current analysis and their parents leading back to the root node.
2.532822
2.492864
1.016029
tax_ids_to_keep = [] for tax_id in tax_ids: tax_ids_to_keep.append(tax_id) tax_ids_to_keep.extend([x.name for x in tree.find(tax_id).ancestors()]) tree = tree.copy() tree.remove_deleted(lambda n: n.name not in tax_ids_to_keep) return tree
def tree_prune_tax_ids(self, tree, tax_ids)
Prunes a tree back to contain only the tax_ids in the list and their parents. Parameters ---------- tree : `skbio.tree.TreeNode` The root node of the tree to perform this operation on. tax_ids : `list` A `list` of taxonomic IDs to keep in the tree. Returns ------- `skbio.tree.TreeNode`, the root of a tree containing the given taxonomic IDs and their parents, leading back to the root node.
2.862686
3.265022
0.876774
if rank is None: return tree.copy() tree = tree.copy() for node in tree.postorder(): if node.rank == rank: node._above_rank = True elif any([getattr(n, "_above_rank", False) for n in node.children]): node._above_rank = True else: node._above_rank = False tree.remove_deleted(lambda n: not getattr(n, "_above_rank", False)) return tree
def tree_prune_rank(self, tree, rank="species")
Takes a TreeNode tree and prunes off any tips not at the specified rank and backwards up until all of the tips are at the specified rank. Parameters ---------- tree : `skbio.tree.TreeNode` The root node of the tree to perform this operation on. rank : {kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- `skbio.tree.TreeNode`, the root of the tree where all tips are at the given rank, and all tips have a path back to the root node. Examples -------- tree_prune_rank(tree, 'species') will remove all subspecies/strain nodes and return a tree containing all genus-level nodes and higher.
3.28399
3.866724
0.849295
f = open_strings_file(file_path, "w") for element in file_elements: f.write(unicode(element)) f.write(u"\n") f.close()
def write_file_elements_to_strings_file(file_path, file_elements)
Write elements to the string file Args: file_path (str): The path to the strings file file_elements (list) : List of elements to write to the file.
2.877485
3.221453
0.893226
logging_level = logging.WARNING if args is not None and args.verbose: logging_level = logging.INFO config = {"level": logging_level, "format": "jtlocalize:%(message)s"} if args is not None and args.log_path != "": config["filename"] = args.log_path logging.basicConfig(**config)
def setup_logging(args=None)
Setup logging module. Args: args (optional): The arguments returned by the argparse module.
3.63731
4.457083
0.816074
localization_dictionary = {} f = open_strings_file(file_path, "r+") header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f) if len(header_comment_key_value_tuples) == 0: logging.warning("Couldn't find any strings in file '%s'. Check encoding and format." % file_path) for header_comment, comments, key, value in header_comment_key_value_tuples: localization_entry = LocalizationEntry(comments, key, value) localization_dictionary[ localization_entry.__getattribute__(localization_entry_attribute_name_for_key)] = localization_entry f.close() return localization_dictionary
def __generate_localization_dictionary_from_file(file_path, localization_entry_attribute_name_for_key)
Generates a dictionary mapping between keys (defined by the given attribute name) and localization entries. Args: file_path (str): The strings file path. localization_entry_attribute_name_for_key: The name of the attribute of LocalizationEntry to use as key. Returns: dict: A dictionary mapping between keys (defined by the given attribute name) and localization entries.
2.978773
3.031299
0.982672
file_data = file_descriptor.read() findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, re.MULTILINE | re.DOTALL) returned_list = [] for header_comment, _ignored, raw_comments, key, value in findall_result: comments = re.findall("/\* (.*?) \*/", raw_comments) if len(comments) == 0: comments = [u""] returned_list.append((header_comment, comments, key, value)) return returned_list
def extract_header_comment_key_value_tuples_from_file(file_descriptor)
Extracts tuples representing comments and localization entries from strings file. Args: file_descriptor (file): The file to read the tuples from Returns: list : List of tuples representing the headers and localization entries.
3.320256
3.604576
0.921123
result_pairs = re.findall(JTL_REGEX, open(file_path).read()) for result_key, result_comment in result_pairs: results_dict[result_key] = result_comment return results_dict
def extract_jtl_string_pairs_from_text_file(results_dict, file_path)
Extracts all string pairs matching the JTL pattern from given text file. This can be used as an "extract_func" argument in the extract_string_pairs_in_directory method. Args: results_dict (dict): The dict to add the the string pairs to. file_path (str): The path of the file from which to extract the string pairs.
3.201223
3.614239
0.885725
result = {} for root, dirnames, filenames in os.walk(directory_path): for file_name in filenames: if filter_func(file_name): file_path = os.path.join(root, file_name) try: extract_func(result, file_path) except Exception as e: print "Error in file " + file_name print e return result
def extract_string_pairs_in_directory(directory_path, extract_func, filter_func)
Retrieves all string pairs in the directory Args: directory_path (str): The path of the directory containing the file to extract string pairs from. extract_func (function): Function for extracting the localization keys and comments from the files. The extract function receives 2 parameters: - dict that the keys (a key in the dict) and comments (a value in the dict) are added to. - str representing file path filter_func (function): Function for filtering files in the directory. The filter function receives the file name and returns a bool representing the filter result. True if the file name passed the filter, False otherwise. Returns: dict: A mapping between string pairs first value (probably the key), and the second value (probably the comment).
2.004736
2.035091
0.985084
escaped_key = re.sub(r'([^\\])"', '\\1\\"', entry_key) file_descriptor.write(u'/* %s */\n' % entry_comment) file_descriptor.write(u'"%s" = "%s";\n' % (escaped_key, escaped_key))
def write_entry_to_file(file_descriptor, entry_comment, entry_key)
Writes a localization entry to the file Args: file_descriptor (file, instance): The file to write the entry to. entry_comment (str): The entry's comment. entry_key (str): The entry's key.
2.772626
2.890321
0.95928
output_file = open_strings_file(file_path, "a") write_section_header_to_file(output_file, section_name) for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)): output_file.write(u'\n') write_entry_to_file(output_file, entry_comment, entry_key) output_file.close()
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name)
Appends dictionary of localization keys and comments to a file Args: localization_key_to_comment (dict): A mapping between localization keys and comments. file_path (str): The path of the file to append to. section_name (str): The name of the section.
2.482225
2.768138
0.896713
output_file_descriptor = open_strings_file(file_name, "w") for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)): write_entry_to_file(output_file_descriptor, entry_comment, entry_key) output_file_descriptor.write(u'\n') output_file_descriptor.close()
def write_dict_to_new_file(file_name, localization_key_to_comment)
Writes dictionary of localization keys and comments to a file. Args: localization_key_to_comment (dict): A mapping between localization keys and comments. file_name (str): The path of the file to append to.
2.74567
2.959373
0.927788
result = [] for root, dir_names, file_names in os.walk(base_dir): for filename in file_names: candidate = os.path.join(root, filename) if should_include_file_in_search(candidate, extensions, exclude_dirs): result.append(candidate) return result
def find_files(base_dir, extensions, exclude_dirs=list())
Find all files matching the given extensions. Args: base_dir (str): Path of base directory to search in. extensions (list): A list of file extensions to search for. exclude_dirs (list): A list of directories to exclude from search. Returns: list of paths that match the search
2.251952
2.608161
0.863425
return (exclude_dirs is None or not any(file_name.startswith(d) for d in exclude_dirs)) and \ any(file_name.endswith(e) for e in extensions)
def should_include_file_in_search(file_name, extensions, exclude_dirs)
Whether or not a filename matches a search criteria according to arguments. Args: file_name (str): A file path to check. extensions (list): A list of file extensions file should match. exclude_dirs (list): A list of directories to exclude from search. Returns: A boolean of whether or not file matches search criteria.
2.315329
3.375475
0.685927
if rank == "auto": # if we're an accessor for a ClassificationsDataFrame, use its _rank property if self.__class__.__name__ == "OneCodexAccessor": return self._rank if self._field == "abundance": return "species" else: return "genus" else: return rank
def _get_auto_rank(self, rank)
Tries to figure out what rank we should use for analyses
10.174273
9.023121
1.127578
return ( getattr(self, "_normalized", False) or getattr(self, "_field", None) == "abundance" or bool((self._results.sum(axis=1).round(4) == 1.0).all()) )
def _guess_normalized(self)
Returns true if the collated counts in `self._results` appear to be normalized. Notes ----- It's possible that the _results df has already been normalized, which can cause some methods to fail. This method lets us guess whether that's true and act accordingly.
6.573182
4.820864
1.363486
from onecodex.dataframes import ClassificationsDataFrame rank = self._get_auto_rank(rank) df = self._results.copy() # subset by taxa if rank: if rank == "kingdom": warnings.warn( "Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, " "Archaea and Eukaryota." ) tax_ids_to_keep = [] for tax_id in df.keys(): if self.taxonomy["rank"][tax_id] == rank: tax_ids_to_keep.append(tax_id) if len(tax_ids_to_keep) == 0: raise OneCodexException("No taxa kept--is rank ({}) correct?".format(rank)) df = df.loc[:, tax_ids_to_keep] # normalize if normalize is False and self._guess_normalized(): raise OneCodexException("Data has already been normalized and this can not be undone.") if normalize is True or ( normalize == "auto" and rank is not None and self._field != "abundance" ): df = df.div(df.sum(axis=1), axis=0) # remove columns (tax_ids) with no values that are > 0 if remove_zeros: df = df.loc[:, (df != 0).any(axis=0)] # restrict to taxa appearing in one or more samples at the given threshold if threshold: df = df.loc[:, df.max() >= threshold] # restrict to N most abundant taxa if top_n: idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index df = df.loc[:, idx] # additional data to copy into the ClassificationsDataFrame ocx_data = { "ocx_metadata": self.metadata.copy(), "ocx_rank": rank, "ocx_field": self._field, "ocx_taxonomy": self.taxonomy.copy(), "ocx_normalized": normalize, } # generate long-format table if table_format == "long": long_df = {"classification_id": [], "tax_id": [], self._field: []} for t_id in df: for c_id, count in df[t_id].iteritems(): long_df["classification_id"].append(c_id) long_df["tax_id"].append(t_id) long_df[self._field].append(count) results_df = ClassificationsDataFrame(long_df, **ocx_data) elif table_format == "wide": results_df = ClassificationsDataFrame(df, **ocx_data) else: raise OneCodexException("table_format must be one of: long, wide") return results_df
def to_df( self, rank="auto", top_n=None, threshold=None, remove_zeros=True, normalize="auto", table_format="wide", )
Takes the ClassificationsDataFrame associated with these samples, or SampleCollection, does some filtering, and returns a ClassificationsDataFrame copy. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. top_n : `integer`, optional Return only the top N most abundant taxa. threshold : `float`, optional Return only taxa more abundant than this threshold in one or more samples. remove_zeros : `bool`, optional Do not return taxa that have zero abundance in every sample. normalize : {'auto', True, False} Convert read counts to relative abundances (each sample sums to 1.0). table_format : {'long', 'wide'} If wide, rows are classifications, cols are taxa, elements are counts. If long, rows are observations with three cols each: classification_id, tax_id, and count. Returns ------- `ClassificationsDataFrame`
3.000726
2.948483
1.017719
from weasyprint import HTML, CSS nb = copy.deepcopy(nb) output, resources = super(OneCodexPDFExporter, self).from_notebook_node( nb, resources=resources, **kw ) buf = BytesIO() HTML(string=output).write_pdf( buf, stylesheets=[CSS(os.path.join(ASSETS_PATH, CSS_TEMPLATE_FILE))] ) buf.seek(0) return buf.read(), resources
def from_notebook_node(self, nb, resources=None, **kw)
Takes output of OneCodexHTMLExporter and runs Weasyprint to get a PDF.
3.583723
2.6554
1.349598
output, resources = super(OneCodexDocumentExporter, self).from_notebook_node( nb, resources=resources, **kw ) from onecodex import Api from onecodex.lib.upload import upload_document_fileobj ocx = Api() default_filename = "Analysis Report - {dt:%B} {dt.day}, {dt:%Y}".format( dt=datetime.datetime.now() ) file_name = resources["metadata"].get("one_codex_doc_portal_filename", default_filename) try: document_id = upload_document_fileobj( BytesIO(output), file_name, ocx._client.session, ocx.Documents._resource ) except UploadException as exc: resp = json.dumps({"status": 500, "message": str(exc)}) return resp, resources except Exception: resp = json.dumps( { "status": 500, "message": "Upload failed. Please contact help@onecodex.com for assistance.", } ) return resp, resources resp = json.dumps({"status": 200, "document_id": document_id}) return resp, resources
def from_notebook_node(self, nb, resources=None, **kw)
Takes PDF output from PDFExporter and uploads to One Codex Documents portal.
3.554503
3.163188
1.123709
localization_directory = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME) if not os.path.exists(localization_directory): os.makedirs(localization_directory) localization_file = os.path.join(localization_directory, LOCALIZATION_FILENAME) # Creating the same directory tree structure in the tmp directory tmp_localization_directory = os.path.join(tmp_directory, DEFAULT_LANGUAGE_DIRECTORY_NAME) tmp_localization_file = os.path.join(tmp_localization_directory, LOCALIZATION_FILENAME) if os.path.isdir(tmp_localization_directory): shutil.rmtree(tmp_localization_directory) os.mkdir(tmp_localization_directory) logging.info("Running genstrings") source_files = extract_source_files(project_base_dir, exclude_dirs) genstrings_cmd = 'genstrings -s JTLocalizedString -o %s %s' % (tmp_localization_directory, " ".join( ['"%s"' % (source_file,) for source_file in source_files])) genstrings_process = subprocess.Popen(genstrings_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True) genstrings_out, genstrings_err = genstrings_process.communicate() remove_empty_comments_from_file(tmp_localization_file) add_genstrings_comments_to_file(tmp_localization_file, genstrings_err) genstrings_rc = genstrings_process.returncode if genstrings_rc != 0: logging.fatal("genstrings returned %d, aborting run!", genstrings_rc) sys.exit(genstrings_rc) create_localized_strings_from_ib_files(project_base_dir, exclude_dirs, tmp_localization_file, special_ui_components_prefix) if include_strings_file: target = open_strings_file(tmp_localization_file, "a") source = open_strings_file(include_strings_file, "r") target.write(source.read()) source.close() target.close() handle_duplications(tmp_localization_file) if os.path.isfile(localization_file): logging.info("Merging old localizable with new one...") merge_strings_files(localization_file, tmp_localization_file) else: logging.info("No Localizable yet, moving the created file...") shutil.move(tmp_localization_file, localization_file)
def generate_strings(project_base_dir, localization_bundle_path, tmp_directory, exclude_dirs, include_strings_file, special_ui_components_prefix)
Calls the builtin 'genstrings' command with JTLocalizedString as the string to search for, and adds strings extracted from UI elements internationalized with 'JTL' + removes duplications.
2.456392
2.411512
1.018611
if callable(filter_func): return self.__class__([obj for obj in self if filter_func(obj) is True], **self._kwargs) else: raise OneCodexException( "Expected callable for filter, got: {}".format(type(filter_func).__name__) )
def filter(self, filter_func)
Return a new SampleCollection containing only samples meeting the filter criteria. Will pass any kwargs (e.g., field or skip_missing) used when instantiating the current class on to the new SampleCollection that is returned. Parameters ---------- filter_func : `callable` A function that will be evaluated on every object in the collection. The function must return a `bool`. If True, the object will be kept. If False, it will be removed from the SampleCollection that is returned. Returns ------- `onecodex.models.SampleCollection` containing only objects `filter_func` returned True on. Examples -------- Generate a new collection of Samples that have a specific filename extension: new_collection = samples.filter(lambda s: s.filename.endswith('.fastq.gz'))
3.913597
5.080237
0.770357
skip_missing = skip_missing if skip_missing else self._kwargs["skip_missing"] new_classifications = [] for a in self._res_list: if a.__class__.__name__ == "Samples": c = a.primary_classification elif a.__class__.__name__ == "Classifications": c = a else: raise OneCodexException( "Objects in SampleCollection must be one of: Classifications, Samples" ) if skip_missing and not c.success: warnings.warn("Classification {} not successful. Skipping.".format(c.id)) continue new_classifications.append(c) self._cached["classifications"] = new_classifications
def _classification_fetch(self, skip_missing=None)
Turns a list of objects associated with a classification result into a list of Classifications objects. Parameters ---------- skip_missing : `bool` If an analysis was not successful, exclude it, warn, and keep going Returns ------- None, but stores a result in self._cached.
3.925309
3.78436
1.037245
import pandas as pd DEFAULT_FIELDS = None metadata = [] for c in self._classifications: m = c.sample.metadata if DEFAULT_FIELDS is None: DEFAULT_FIELDS = list(m._resource._schema["properties"].keys()) DEFAULT_FIELDS.remove("$uri") DEFAULT_FIELDS.remove("sample") metadatum = {f: getattr(m, f) for f in DEFAULT_FIELDS} metadatum["classification_id"] = c.id metadatum["sample_id"] = m.sample.id metadatum["metadata_id"] = m.id metadatum["created_at"] = m.sample.created_at metadatum["filename"] = c.sample.filename metadatum.update(m.custom) metadata.append(metadatum) if metadata: metadata = pd.DataFrame(metadata).set_index("classification_id") else: metadata = pd.DataFrame( columns=["classification_id", "sample_id", "metadata_id", "created_at"] ) self._cached["metadata"] = metadata
def _collate_metadata(self)
Turns a list of objects associated with a classification result into a DataFrame of metadata. Returns ------- None, but stores a result in self._cached.
2.943778
2.792351
1.054229
import pandas as pd field = field if field else self._kwargs["field"] if field not in ("auto", "abundance", "readcount", "readcount_w_children"): raise OneCodexException("Specified field ({}) not valid.".format(field)) # we'll fill these dicts that eventually turn into DataFrames df = {"classification_id": [c.id for c in self._classifications]} tax_info = {"tax_id": [], "name": [], "rank": [], "parent_tax_id": []} if field == "auto": field = "readcount_w_children" self._cached["field"] = field for c_idx, c in enumerate(self._classifications): # pulling results from mainline is the slowest part of the function result = c.results()["table"] # d contains info about a taxon in result, including name, id, counts, rank, etc. for d in result: d_tax_id = d["tax_id"] if d_tax_id not in tax_info["tax_id"]: for k in ("tax_id", "name", "rank", "parent_tax_id"): tax_info[k].append(d[k]) # first time we've seen this taxon, so make a vector for it df[d_tax_id] = [0] * len(self._classifications) df[d_tax_id][c_idx] = d[field] # format as a Pandas DataFrame df = pd.DataFrame(df).set_index("classification_id").fillna(0) df.columns.name = "tax_id" tax_info = pd.DataFrame(tax_info).set_index("tax_id") self._cached["results"] = df self._cached["taxonomy"] = tax_info
def _collate_results(self, field=None)
For a list of objects associated with a classification result, return the results as a DataFrame and dict of taxa info. Parameters ---------- field : {'readcount_w_children', 'readcount', 'abundance'} Which field to use for the abundance/count of a particular taxon in a sample. - 'readcount_w_children': total reads of this taxon and all its descendants - 'readcount': total reads of this taxon - 'abundance': genome size-normalized relative abundances, from shotgun sequencing Returns ------- None, but stores a result in self._cached.
3.984714
3.493109
1.140736
otu_format = "Biological Observation Matrix 1.0.0" # Note: This is exact format URL is required by https://github.com/biocore/biom-format otu_url = "http://biom-format.org" otu = OrderedDict( { "id": biom_id, "format": otu_format, "format_url": otu_url, "type": "OTU table", "generated_by": "One Codex API V1", "date": datetime.now().isoformat(), "rows": [], "columns": [], "matrix_type": "sparse", "matrix_element_type": "int", } ) rows = defaultdict(dict) tax_ids_to_names = {} for classification in self._classifications: col_id = len(otu["columns"]) # 0 index # Re-encoding the JSON is a bit of a hack, but # we need a ._to_dict() method that properly # resolves references and don't have one at the moment columns_entry = { "id": str(classification.id), "sample_id": str(classification.sample.id), "sample_filename": classification.sample.filename, "metadata": json.loads( classification.sample.metadata._to_json(include_references=False) ), } otu["columns"].append(columns_entry) sample_df = classification.table() for row in sample_df.iterrows(): tax_id = row[1]["tax_id"] tax_ids_to_names[tax_id] = row[1]["name"] rows[tax_id][col_id] = int(row[1]["readcount"]) num_rows = len(rows) num_cols = len(otu["columns"]) otu["shape"] = [num_rows, num_cols] otu["data"] = [] for present_taxa in sorted(rows): # add the row entry row_id = len(otu["rows"]) otu["rows"].append( {"id": present_taxa, "metadata": {"taxonomy": tax_ids_to_names[present_taxa]}} ) for sample_with_hit in rows[present_taxa]: counts = rows[present_taxa][sample_with_hit] otu["data"].append([row_id, sample_with_hit, counts]) return otu
def to_otu(self, biom_id=None)
Converts a list of objects associated with a classification result into a `dict` resembling an OTU table. Parameters ---------- biom_id : `string`, optional Optionally specify an `id` field for the generated v1 BIOM file. Returns ------- otu_table : `OrderedDict` A BIOM OTU table, returned as a Python OrderedDict (can be dumped to JSON)
3.210745
3.149074
1.019584
output_file_elements = [] old_translated_file_dict = generate_localization_key_to_entry_dictionary_from_file(old_translated_file) new_translated_file_dict = generate_localization_key_to_entry_dictionary_from_file(new_translated_file) f = open_strings_file(updated_localizable_file, "r") for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f): translation_value = None if len(header_comment) > 0: output_file_elements.append(Comment(header_comment)) if value in new_translated_file_dict: translation_value = new_translated_file_dict[value].value elif key in old_translated_file_dict: translation_value = old_translated_file_dict[key].value elif key in new_translated_file_dict: translation_value = new_translated_file_dict[key].value if translation_value is not None: output_file_elements.append(LocalizationEntry(comments, key, translation_value)) f.close() write_file_elements_to_strings_file(merged_translated_file, output_file_elements)
def localization_merge_back(updated_localizable_file, old_translated_file, new_translated_file, merged_translated_file)
Generates a file merging the old translations and the new ones. Args: updated_localizable_file (str): The path to the updated localization strings file, meaning the strings that require translation. old_translated_file (str): The path to the strings file containing the previously translated strings. new_translated_file (str): The path to the strings file containing the newly translated strings. merged_translated_file (str): The path to the output file with the merged translations.
2.22937
2.283986
0.976087
# must be one of Nominal, Ordinal, Time per altair if category_type not in ("N", "O", "T"): raise OneCodexException("If specifying category_type, must be N, O, or T") # adapted from https://altair-viz.github.io/gallery/boxplot_max_min.html lower_box = "q1({}):Q".format(quantity) lower_whisker = "min({}):Q".format(quantity) upper_box = "q3({}):Q".format(quantity) upper_whisker = "max({}):Q".format(quantity) if category_type == "T": x_format = "hoursminutes({}):{}".format(category, category_type) else: x_format = "{}:{}".format(category, category_type) lower_plot = ( alt.Chart(df) .mark_rule() .encode(y=alt.Y(lower_whisker, axis=alt.Axis(title=ylabel)), y2=lower_box, x=x_format) ) middle_plot = alt.Chart(df).mark_bar(size=35).encode(y=lower_box, y2=upper_box, x=x_format) upper_plot = alt.Chart(df).mark_rule().encode(y=upper_whisker, y2=upper_box, x=x_format) middle_tick = ( alt.Chart(df) .mark_tick(color="black", size=35) .encode( y="median({}):Q".format(quantity), x=alt.X(x_format, axis=alt.Axis(title=xlabel), scale=alt.Scale(rangeStep=45)), tooltip="median({}):Q".format(quantity), ) ) chart = lower_plot + middle_plot + upper_plot + middle_tick if title: chart = chart.properties(title=title) return chart
def boxplot(df, category, quantity, category_type="N", title=None, xlabel=None, ylabel=None)
Plot a simple boxplot using Altair. Parameters ---------- df : `pandas.DataFrame` Contains columns matching 'category' and 'quantity' labels, at a minimum. category : `string` The name of the column in df used to group values on the horizontal axis. quantity : `string` The name of the columm in df of values to plot on the vertical axis. Must be numerical. category_type : {'N', 'O', 'T'}, optional Nominal, ordinal, or time values can be used as categories. Quantitative (Q) values look weird. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. Returns ------- `altair.Chart`
2.573413
2.575373
0.999239
plot_data = { "x": [], "y": [], "o": [], # order these points should be connected in "b": [], # one number per branch } for idx, (i, d) in enumerate(zip(tree["icoord"], tree["dcoord"])): plot_data["x"].extend(map(lambda x: -x, d)) plot_data["y"].extend(map(lambda x: -x, i)) plot_data["o"].extend([0, 1, 2, 3]) plot_data["b"].extend([idx] * 4) plot_data = pd.DataFrame(plot_data) chart = ( alt.Chart(plot_data, width=100, height=15 * len(tree["leaves"]) - 7.5) .mark_line(point=False, opacity=0.5) .encode( x=alt.X("x", axis=None), y=alt.Y("y", axis=None, scale=alt.Scale(zero=True, nice=False)), order="o", color=alt.Color( "b:N", scale=alt.Scale(domain=list(range(idx + 1)), range=["black"] * (idx + 1)), legend=None, ), ) ) return chart
def dendrogram(tree)
Plot a simple square dendrogram using Altair. Parameters ---------- tree : `dict` returned by `scipy.cluster.hierarchy.dendrogram` Contains, at a minimum, 'icoord', 'dcoord', and 'leaves' keys. Scipy does all the work of determining where the lines in the tree should go. All we have to do is draw them. Returns ------- `altair.Chart`
2.632316
2.478007
1.062271
errors_to_log = [line for line in genstrings_err.splitlines() if "used with multiple comments" not in line] if len(errors_to_log) > 0: logging.warning("genstrings warnings:\n%s", "\n".join(errors_to_log)) loc_file = open_strings_file(localization_file, "a") regex_matches = re.findall(r'Warning: Key "(.*?)" used with multiple comments ("[^"]*" (& "[^"]*")+)', genstrings_err) logging.info("Adding multiple comments from genstrings output") for regex_match in regex_matches: if len(regex_match) == 3: key = regex_match[0] comments = [comment.strip()[1:-1] for comment in regex_match[1].split("&")] logging.info("Found key with %d comments: %s", len(comments), key) loc_key = LocalizationEntry(comments, key, key) loc_file.write(unicode(loc_key)) loc_file.write(u"\n") loc_file.close()
def add_genstrings_comments_to_file(localization_file, genstrings_err)
Adds the comments produced by the genstrings script for duplicate keys. Args: localization_file (str): The path to the strings file.
3.479562
3.48398
0.998732
if path and file_obj: raise OneCodexException("Please specify only one of: path, file_obj") if path is None and file_obj is None: path = os.path.join(os.getcwd(), self.filename) if path and os.path.exists(path): raise OneCodexException("{} already exists! Will not overwrite.".format(path)) try: url_data = self._resource.download_uri() resp = requests.get(url_data["download_uri"], stream=True) with (open(path, "wb") if path else file_obj) as f_out: if progressbar: with click.progressbar(length=self.size, label=self.filename) as bar: for data in resp.iter_content(chunk_size=1024): bar.update(len(data)) f_out.write(data) else: for data in resp.iter_content(chunk_size=1024): f_out.write(data) except KeyboardInterrupt: if path: os.remove(path) raise except requests.exceptions.HTTPError as exc: if exc.response.status_code == 401: raise OneCodexException("You must be logged in to download files.") elif exc.response.status_code == 402: raise OneCodexException( "You must either have a premium platform account or be in " "a notebook environment to download files." ) elif exc.response.status_code == 403: raise OneCodexException("You are not authorized to download this file.") else: raise OneCodexException( "Download failed with an HTTP status code {}.".format(exc.response.status_code) ) return path
def download(self, path=None, file_obj=None, progressbar=False)
Downloads files from One Codex. Parameters ---------- path : `string`, optional Full path to save the file to. If omitted, defaults to the original filename in the current working directory. file_obj : file-like object, optional Rather than save the file to a path, write it to this file-like object. progressbar : `bool` Display a progress bar using Click for the download? Returns ------- `string` The path the file was downloaded to, if applicable. Otherwise, None. Notes ----- If no arguments specified, defaults to download the file as the original filename in the current working directory. If `file_obj` given, will write data into the passed file-like object. If `path` given, will download the file to the path provided, but will not overwrite any existing files.
2.225535
2.198076
1.012493
res = cls._resource doc_id = upload_document(file_path, res._client.session, res, progressbar=progressbar) return cls.get(doc_id)
def upload(cls, file_path, progressbar=None)
Uploads a series of files to the One Codex server. Parameters ---------- file_path : `string` A path to a file on the system. progressbar : `click.progressbar`, optional If passed, display a progress bar using Click. Returns ------- A `Samples` object upon successful upload. None if the upload failed.
6.947553
11.498992
0.604188
logging.info("Preparing for translation..") for strings_file in os.listdir(os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME)): if not strings_file.endswith(".strings"): continue strings_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, strings_file) for lang_dir in os.listdir(localization_bundle_path): if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME or lang_dir.startswith("."): continue dest_strings_path = os.path.join(localization_bundle_path, lang_dir, strings_file) pending_path = dest_strings_path + ".pending" excluded_path = dest_strings_path + ".excluded" if not os.path.exists(dest_strings_path): open_strings_file(dest_strings_path, "a").close() logging.info("Preparing diff for %s in %s", lang_dir, pending_path) localization_diff(strings_path, dest_strings_path, excluded_path, pending_path)
def prepare_for_translation(localization_bundle_path)
Prepares the localization bundle for translation. This means, after creating the strings files using genstrings.sh, this will produce '.pending' files, that contain the files that are yet to be translated. Args: localization_bundle_path (str): The path to the localization bundle.
2.385022
2.308458
1.033167
# set up the context for sub commands click.Context.get_usage = click.Context.get_help ctx.obj = {} ctx.obj["API_KEY"] = api_key ctx.obj["NOPPRINT"] = no_pprint ctx.obj["TELEMETRY"] = telemetry if verbose: log.setLevel(logging.DEBUG) # handle checking insecure platform, we let upload command do it by itself if ctx.invoked_subcommand != "upload": warn_if_insecure_platform()
def onecodex(ctx, api_key, no_pprint, verbose, telemetry)
One Codex v1 API command line interface
5.264585
5.3869
0.977294
if len(files) == 0: click.echo(ctx.get_help()) return files = list(files) bar = click.progressbar(length=sum([_file_size(x) for x in files]), label="Uploading... ") run_via_threadpool( ctx.obj["API"].Documents.upload, files, {"progressbar": bar}, max_threads=max_threads, graceful_exit=False, )
def documents_upload(ctx, max_threads, files)
Upload a document file (of any type) to One Codex
4.239732
4.23181
1.001872
# basic operation -- just print if not readlevel and not results: cli_resource_fetcher(ctx, "classifications", classifications) # fetch the results elif not readlevel and results: if len(classifications) != 1: log.error("Can only request results data on one Classification at a time") else: classification = ctx.obj["API"].Classifications.get(classifications[0]) if not classification: log.error( "Could not find classification {} (404 status code)".format(classifications[0]) ) return results = classification.results(json=True) pprint(results, ctx.obj["NOPPRINT"]) # fetch the readlevel elif readlevel is not None and not results: if len(classifications) != 1: log.error("Can only request read-level data on one Classification at a time") else: classification = ctx.obj["API"].Classifications.get(classifications[0]) if not classification: log.error( "Could not find classification {} (404 status code)".format(classifications[0]) ) return tsv_url = classification._readlevel()["url"] log.info("Downloading tsv data from: {}".format(tsv_url)) download_file_helper(tsv_url, readlevel_path) # both given -- complain else: log.error("Can only request one of read-level data or results data at a time")
def classifications(ctx, classifications, results, readlevel, readlevel_path)
Retrieve performed metagenomic classifications
3.293757
3.304889
0.996632
appendables = {} if tags: appendables["tags"] = [] for tag in tags: appendables["tags"].append(tag) if metadata: appendables["metadata"] = {} for metadata_kv in metadata: split_metadata = metadata_kv.split("=", 1) if len(split_metadata) > 1: metadata_value = split_metadata[1] appendables["metadata"][split_metadata[0]] = metadata_value appendables = validate_appendables(appendables, ctx.obj["API"]) if (forward or reverse) and not (forward and reverse): click.echo("You must specify both forward and reverse files", err=True) ctx.exit(1) if forward and reverse: if len(files) > 0: click.echo( "You may not pass a FILES argument when using the " " --forward and --reverse options.", err=True, ) ctx.exit(1) files = [(forward, reverse)] elif len(files) == 0: click.echo(ctx.get_help()) return else: files = list(files) # "intelligently" find paired files and tuple them paired_files = [] single_files = set(files) for filename in files: # convert "read 1" filenames into "read 2" and check that they exist; if they do # upload the files as a pair, autointerleaving them pair = re.sub("[._][Rr]1[._]", lambda x: x.group().replace("1", "2"), filename) # we don't necessary need the R2 to have been passed in; we infer it anyways if pair != filename and os.path.exists(pair): if not prompt and pair not in single_files: # if we're not prompting, don't automatically pull in files # not in the list the user passed in continue paired_files.append((filename, pair)) if pair in single_files: single_files.remove(pair) single_files.remove(filename) auto_pair = True if prompt and len(paired_files) > 0: pair_list = "" for p in paired_files: pair_list += "\n {} & {}".format(os.path.basename(p[0]), os.path.basename(p[1])) answer = click.confirm( "It appears there are paired files:{}\nInterleave them after upload?".format( pair_list ), default="Y", ) if not answer: auto_pair = False if auto_pair: files = paired_files + list(single_files) total_size = sum( [ (_file_size(x[0], uncompressed=True) + _file_size(x[1], uncompressed=True)) if isinstance(x, tuple) else _file_size(x, uncompressed=False) for x in files ] ) upload_kwargs = { "metadata": appendables["valid_metadata"], "tags": appendables["valid_tags"], "project": project_id, "coerce_ascii": coerce_ascii, "progressbar": progressbar(length=total_size, label="Uploading..."), } run_via_threadpool( ctx.obj["API"].Samples.upload, files, upload_kwargs, max_threads=max_threads, graceful_exit=False, )
def upload( ctx, files, max_threads, prompt, forward, reverse, tags, metadata, project_id, coerce_ascii )
Upload a FASTA or FASTQ (optionally gzip'd) to One Codex
3.461039
3.404438
1.016626
base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com") if not ctx.obj["API_KEY"]: _login(base_url) else: email = _login(base_url, api_key=ctx.obj["API_KEY"]) ocx = Api(api_key=ctx.obj["API_KEY"], telemetry=ctx.obj["TELEMETRY"]) # TODO: This should be protected or built in as a first class resource # with, e.g., connection error catching (it's not part of our formally documeted API at the moment) if ocx._client.Account.instances()["email"] != email: click.echo("Your login credentials do not match the provided email!", err=True) _remove_creds() ctx.exit(1)
def login(ctx)
Add an API key (saved in ~/.onecodex)
7.707067
6.668739
1.155701
tax_id_map = {} if parent: for row in classification.results()["table"]: if row["parent_tax_id"] is not None and row["tax_id"] is not None: tax_id_map[row["tax_id"]] = row["parent_tax_id"] else: for row in classification.results()["table"]: if row["parent_tax_id"] is not None and row["tax_id"] is not None: try: tax_id_map[row["parent_tax_id"]].add(row["tax_id"]) except KeyError: tax_id_map[row["parent_tax_id"]] = set([row["tax_id"]]) return tax_id_map
def make_taxonomy_dict(classification, parent=False)
Takes a classification data frame returned by the API and parses it into a dictionary mapping a tax_id to its children (or parent). Restricted to tax_id's that are represented in the classification results.
1.826225
1.736982
1.051379
if parent: # TODO: allow filtering on tax_id and its parents, too pass else: def _child_recurse(tax_id, visited): try: children = [tax_id] + list(tax_id_map[tax_id]) except KeyError: children = [tax_id] for child in children: if child not in visited: visited.append(child) children.extend(_child_recurse(child, visited)) return children return list(set(_child_recurse(tax_id, [])))
def recurse_taxonomy_map(tax_id_map, tax_id, parent=False)
Takes the output dict from make_taxonomy_map and an input tax_id and recurses either up or down through the tree to get /all/ children (or parents) of the given tax_id.
3.165269
3.245358
0.975322
# FIXME more stable implementation that only attempts to resolve {"$ref"} objects where they are allowed. if isinstance(schema, dict): if len(schema) == 1 and "$ref" in schema and isinstance(schema["$ref"], six.string_types): reference = schema["$ref"] if reference.startswith("#"): # TODO should also resolve any paths within the reference, which would need to be deferred. return root return ref_resolver(reference) resolved = {} for k, v in schema.items(): resolved[k] = schema_resolve_refs(v, ref_resolver=ref_resolver, root=root if root is not None else resolved) return resolved if isinstance(schema, (list, tuple)): return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root) for v in schema] return schema
def schema_resolve_refs(schema, ref_resolver=None, root=None)
Helper method for decoding references. Self-references are resolved automatically; other references are resolved using a callback function. :param object schema: :param callable ref_resolver: :param None root: :return:
3.389351
3.38326
1.0018
cls = type(str(upper_camel_case(name)), (resource_cls or Resource, collections.MutableMapping), { '__doc__': schema.get('description', '') }) cls._schema = schema cls._client = self cls._links = links = {} for link_schema in schema['links']: link = Link(self, rel=link_schema['rel'], href=link_schema['href'], method=link_schema['method'], schema=link_schema.get('schema', None), target_schema=link_schema.get('targetSchema', None)) # Set Resource._self, etc. for the special methods as they are managed by the Resource class if link.rel in ('self', 'instances', 'create', 'update', 'destroy'): setattr(cls, '_{}'.format(link.rel), link) links[link.rel] = link if link.rel != 'update': # 'update' is a special case because of MutableMapping.update() setattr(cls, snake_case(link.rel), link) # TODO routes (instance & non-instance) for property_name, property_schema in schema.get('properties', {}).items(): # skip $uri and $id as these are already implemented in Resource and overriding them causes unnecessary # fetches. if property_name.startswith('$'): continue if property_schema.get('readOnly', False): # TODO better error message. Raises AttributeError("can't set attribute") setattr(cls, property_name, property(fget=partial((lambda name, obj: getitem(obj, name)), property_name), doc=property_schema.get('description', None))) else: setattr(cls, property_name, property(fget=partial((lambda name, obj: getitem(obj, name)), property_name), fset=partial((lambda name, obj, value: setitem(obj, name, value)), property_name), fdel=partial((lambda name, obj: delitem(obj, name)), property_name), doc=property_schema.get('description', None))) root = None if 'instances' in links: root = cls._instances.href elif 'self' in links: root = cls._self.href[:cls._self.href.rfind('/')] else: root = self._root_path + '/' + name.replace('_', '-') self._resources[root] = cls return cls
def resource_factory(self, name, schema, resource_cls=None)
Registers a new resource with a given schema. The schema must not have any unresolved references (such as `{"$ref": "#"}` for self-references, or otherwise). A subclass of :class:`Resource` may be provided to add specific functionality to the resulting :class:`Resource`. :param str name: :param dict schema: :param Resource resource_cls: a subclass of :class:`Resource` or None :return: The new :class:`Resource`.
3.256697
3.370924
0.966114
result = [] for ib_file_path in find_files(directory, [".xib", ".storyboard"], exclude_dirs): result += extract_string_pairs_in_ib_file(ib_file_path, special_ui_components_prefix) return result
def extract_string_pairs_in_dir(directory, exclude_dirs, special_ui_components_prefix)
Extract string pairs in the given directory's xib/storyboard files. Args: directory (str): The path to the directory. exclude_dirs (str): A list of directories to exclude from extraction. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix. Returns: list: The extracted string pairs for all IB files in the directory.
3.229626
2.587529
1.248151
element_entry_comment = get_element_attribute_or_empty(element, 'userLabel') if element_entry_comment == "": try: element_entry_comment = element.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: element_entry_comment = "" if not element_entry_comment.lower().startswith(JT_INTERNATIONALIZED_COMMENT_PREFIX): return None else: return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]
def extract_element_internationalized_comment(element)
Extracts the xib element's comment, if the element has been internationalized. Args: element (element): The element from which to extract the comment. Returns: The element's internationalized comment, None if it does not exist, or hasn't been internationalized (according to the JTLocalize definitions).
3.255404
3.294518
0.988127
valid_class_names = ["%s%s" % (DEFAULT_UI_COMPONENTS_PREFIX, class_suffix)] if special_ui_components_prefix is not None: valid_class_names.append("%s%s" % (special_ui_components_prefix, class_suffix)) if (not element.hasAttribute('customClass')) or element.attributes['customClass'].value not in valid_class_names: logging.warn("WARNING: %s is internationalized but isn't one of %s", extract_element_internationalized_comment(element), valid_class_names)
def warn_if_element_not_of_class(element, class_suffix, special_ui_components_prefix)
Log a warning if the element is not of the given type (indicating that it is not internationalized). Args: element: The xib's XML element. class_name: The type the element should be, but is missing. special_ui_components_prefix: If provided, will not warn about class with this prefix (default is only 'JT')
3.579159
3.696615
0.968226
attributed_strings = ui_element.getElementsByTagName('attributedString') if attributed_strings.length == 0: return False attributed_element = attributed_strings[0] fragment_index = 1 for fragment in attributed_element.getElementsByTagName('fragment'): # The fragment text is either as an attribute <fragment content="TEXT"> # or a child in the format <string key='content'>TEXT</string> try: label_entry_key = fragment.attributes['content'].value except KeyError: label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue comment = "%s Part %d" % (comment_prefix, fragment_index) results.append((label_entry_key, comment)) fragment_index += 1 return fragment_index > 1
def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix)
Adds string pairs from a UI element with attributed text Args: results (list): The list to add the results to. attributed_element (element): The element from the xib that contains, to extract the fragments from. comment_prefix (str): The prefix of the comment to use for extracted string (will be appended "Part X" suffices) Returns: bool: Whether or not an attributed string was found.
4.06883
3.798327
1.071216
label_entry_comment = extract_element_internationalized_comment(label) if label_entry_comment is None: return warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix) if label.hasAttribute('usesAttributedText') and label.attributes['usesAttributedText'].value == 'YES': add_string_pairs_from_attributed_ui_element(results, label, label_entry_comment) else: try: label_entry_key = label.attributes['text'].value except KeyError: try: label_entry_key = label.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: label_entry_key = 'N/A' logging.warn("%s: Missing text entry in %s", xib_file, label.toxml('UTF8')) results.append((label_entry_key, label_entry_comment))
def add_string_pairs_from_label_element(xib_file, results, label, special_ui_components_prefix)
Adds string pairs from a label element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. label (element): The label element from the xib, to extract the string pairs from. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix.
3.578544
3.725979
0.960431
text_field_entry_comment = extract_element_internationalized_comment(text_field) if text_field_entry_comment is None: return if text_field.hasAttribute('usesAttributedText') and text_field.attributes['usesAttributedText'].value == 'YES': add_string_pairs_from_attributed_ui_element(results, text_field, text_field_entry_comment) else: try: text_field_entry_key = text_field.attributes['text'].value results.append((text_field_entry_key, text_field_entry_comment + ' default text value')) except KeyError: pass try: text_field_entry_key = text_field.attributes['placeholder'].value results.append((text_field_entry_key, text_field_entry_comment + ' placeholder text value')) except KeyError: pass warn_if_element_not_of_class(text_field, 'TextField', special_ui_components_prefix)
def add_string_pairs_from_text_field_element(xib_file, results, text_field, special_ui_components_prefix)
Adds string pairs from a textfield element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. text_field(element): The textfield element from the xib, to extract the string pairs from. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix.
2.742346
2.847458
0.963085
text_view_entry_comment = extract_element_internationalized_comment(text_view) if text_view_entry_comment is None: return if text_view.hasAttribute('usesAttributedText') and text_view.attributes['usesAttributedText'].value == 'YES': add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment) else: try: text_view_entry_key = text_view.attributes['text'].value results.append((text_view_entry_key, text_view_entry_comment + ' default text value')) except KeyError: pass warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)
def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix)
Adds string pairs from a textview element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. text_view(element): The textview element from the xib, to extract the string pairs from. special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
3.609712
3.804689
0.948753
button_entry_comment = extract_element_internationalized_comment(button) if button_entry_comment is None: return for state in button.getElementsByTagName('state'): state_name = state.attributes['key'].value state_entry_comment = button_entry_comment + " - " + state_name + " state of button" if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment): try: button_entry_key = state.attributes['title'].value except KeyError: try: button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: continue results.append((button_entry_key, state_entry_comment)) warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix)
Adds strings pairs from a button xib element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. button(element): The button element from the xib, to extract the string pairs from. special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
3.9053
4.196039
0.930711
try: results = [] xmldoc = minidom.parse(file_path) element_name_to_add_func = {'label': add_string_pairs_from_label_element, 'button': add_string_pairs_from_button_element, 'textField': add_string_pairs_from_text_field_element, 'textView': add_string_pairs_from_text_view_element} for element_name in element_name_to_add_func: add_func = element_name_to_add_func[element_name] elements = xmldoc.getElementsByTagName(element_name) for element in elements: add_func(file_path, results, element, special_ui_components_prefix) # Find strings of format JTL('Key Name', 'Key Comment') and add them to the results jtl_brackets_find_results = re.findall(JTL_REGEX, open(file_path).read()) unescaped_jtl_brackets_find_results = [(unescape(x), unescape(y)) for (x, y) in jtl_brackets_find_results] results += unescaped_jtl_brackets_find_results if len(results) > 0: results = [(None, os.path.basename(file_path))] + results return results except Exception, e: logging.warn("ERROR: Error processing %s (%s: %s)", file_path, type(e), str(e)) return []
def extract_string_pairs_in_ib_file(file_path, special_ui_components_prefix)
Extract the strings pairs (key and comment) from a xib file. Args: file_path (str): The path to the xib file. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix. Returns: list: List of tuples representing the string pairs.
2.658982
2.649528
1.003568
old_translated_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(translated_file) if excluded_strings_file is not None and os.path.isfile(excluded_strings_file): excluded_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(excluded_strings_file) else: excluded_file_dictionary = {} # The reason we keep a list of the keys, and not just pop is because values can repeat themselves. translated_list = old_translated_file_dictionary.keys() output_dictionary = {} output_file_elements = [] f = open_strings_file(localizable_file, "r") output_file_elements.append(Comment(u % (VALUE_PLACEHOLDER,))) for _header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f): if key in translated_list or key in excluded_file_dictionary: if key in old_translated_file_dictionary: old_translated_file_dictionary.pop(key) elif value in output_dictionary: output_dictionary[value].add_comments(comments) output_file_elements.append(Comment( u"/* There was a value '%s' here but it was a duplicate of an older value and removed. */\n" % value)) else: loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER) output_dictionary[value] = loc_obj output_file_elements.append(loc_obj) for key, removed_trans in old_translated_file_dictionary.items(): output_file_elements.append(Comment(u % (", ".join(removed_trans.comments), removed_trans.key, removed_trans.value))) write_file_elements_to_strings_file(output_translation_file, output_file_elements)
def localization_diff(localizable_file, translated_file, excluded_strings_file, output_translation_file)
Generates a strings file representing the strings that were yet to be translated. Args: localizable_file (str): The path to the localization strings file, meaning the file that represents the strings that require translation. translated_file (str): The path to the translated strings file, meaning the file containing the strings that were already translated. excluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and from future diffs. output_translation_file (str): The path to the output file, which will contain the strings the require translation, but are not in the already given translation file.
3.694044
3.749312
0.985259
is_epub = isinstance(app.builder, EpubBuilder) config_pages = app.config.html_additional_pages if not is_epub and "404" not in config_pages: yield ("404", {}, "404.html")
def add_404_page(app)
Build an extra ``404.html`` page if no ``"404"`` key is in the ``html_additional_pages`` config.
4.774262
3.561171
1.340644
base = context.get("canonical_url") if not base: return target = app.builder.get_target_uri(pagename) context["page_canonical_url"] = base + target
def canonical_url(app, pagename, templatename, context, doctree)
Build the canonical URL for a page. Appends the path for the page to the base URL specified by the ``html_context["canonical_url"]`` config and stores it in ``html_context["page_canonical_url"]``.
4.430829
4.171243
1.062232
if app.config.singlehtml_sidebars is not None and isinstance( app.builder, SingleFileHTMLBuilder ): app.config.html_sidebars = app.config.singlehtml_sidebars
def singlehtml_sidebars(app)
When using a ``singlehtml`` builder, replace the ``html_sidebars`` config with ``singlehtml_sidebars``. This can be used to change what sidebars are rendered for the single page called ``"index"`` by the builder.
4.263015
3.045997
1.399547
docstring = inspect.getdoc(obj) or "" if skip or re.search(r"^\s*:internal:\s*$", docstring, re.M) is not None: return True
def skip_internal(app, what, name, obj, skip, options)
Skip rendering autodoc when the docstring contains a line with only the string `:internal:`.
4.668551
3.909586
1.194129
if what != "module": return lines[:] = [ line for line in lines if not line.startswith((":copyright:", ":license:")) ]
def cut_module_meta(app, what, name, obj, options, lines)
Don't render lines that start with ``:copyright:`` or ``:license:`` when rendering module autodoc. These lines are useful meta information in the source code, but are noisy in the docs.
4.482841
3.200651
1.400603
try: release = pkg_resources.get_distribution(name).version except ImportError: print( textwrap.fill( "'{name}' must be installed to build the documentation." " Install from source using `pip install -e .` in a" " virtualenv.".format(name=name) ) ) sys.exit(1) version = ".".join(release.split(".", version_length)[:version_length]) if placeholder: version = "{}.{}".format(version, placeholder) return release, version
def get_version(name, version_length=2, placeholder="x")
Ensures that the named package is installed and returns version strings to be used by Sphinx. Sphinx uses ``version`` to mean an abbreviated form of the full version string, which is called ``release``. In ``conf.py``:: release, version = get_version("Flask") # release = 1.0.x, version = 1.0.3.dev0 :param name: Name of package to get. :param version_length: How many values from ``release`` to use for ``version``. :param placeholder: Extra suffix to add to the version. The default produces versions like ``1.2.x``. :return: ``(release, version)`` tuple.
3.435224
3.129474
1.0977
if app.config.is_pallets_theme is not None: return theme = getattr(app.builder, "theme", None) while theme is not None: if theme.name == "pocoo": app.config.is_pallets_theme = True break theme = theme.base else: app.config.is_pallets_theme = False
def set_is_pallets_theme(app)
Set the ``is_pallets_theme`` config to ``True`` if the current theme is a decedent of the ``pocoo`` theme.
2.841461
2.36825
1.199815
def decorator(f): @wraps(f) def wrapped(app, *args, **kwargs): if not app.config.is_pallets_theme: return default return f(app, *args, **kwargs) return wrapped return decorator
def only_pallets_theme(default=None)
Create a decorator that calls a function only if the ``is_pallets_theme`` config is ``True``. Used to prevent Sphinx event callbacks from doing anything if the Pallets themes are installed but not used. :: @only_pallets_theme() def inject_value(app): ... app.connect("builder-inited", inject_value) :param default: Value to return if a Pallets theme is not in use. :return: A decorator.
2.909225
2.451757
1.186588
old_call = subprocess.call def dummy_call(*args, **kwargs): with tempfile.TemporaryFile("wb+") as f: kwargs["stdout"] = f kwargs["stderr"] = f rv = subprocess.Popen(*args, **kwargs).wait() f.seek(0) click.echo(f.read().decode("utf-8", "replace").rstrip()) return rv subprocess.call = dummy_call try: yield finally: subprocess.call = old_call
def patch_modules()
Patch modules to work better with :meth:`ExampleRunner.invoke`. ``subprocess.call` output is redirected to ``click.echo`` so it shows up in the example output.
2.875278
2.564769
1.121067
runner = getattr(document, "click_example_runner", None) if runner is None: runner = document.click_example_runner = ExampleRunner() return runner
def get_example_runner(document)
Get or create the :class:`ExampleRunner` instance associated with a document.
3.671241
4.134465
0.88796
output_lines = _output_lines if _output_lines is not None else [] if env: for key, value in sorted(env.items()): value = shlex.quote(value) output_lines.append("$ export {}={}".format(key, value)) args = args or [] if prog_name is None: prog_name = cli.name.replace("_", "-") output_lines.append( "$ {} {}".format(prog_name, " ".join(shlex.quote(x) for x in args)).rstrip() ) # remove "python" from command prog_name = prog_name.rsplit(" ", 1)[-1] if isinstance(input, (tuple, list)): input = "\n".join(input) + "\n" if terminate_input: input += "\x04" result = super(ExampleRunner, self).invoke( cli=cli, args=args, input=input, env=env, prog_name=prog_name, **extra ) output_lines.extend(result.output.splitlines()) return result
def invoke( self, cli, args=None, prog_name=None, input=None, terminate_input=False, env=None, _output_lines=None, **extra )
Like :meth:`CliRunner.invoke` but displays what the user would enter in the terminal for env vars, command args, and prompts. :param terminate_input: Whether to display "^D" after a list of input. :param _output_lines: A list used internally to collect lines to be displayed.
2.527381
2.503335
1.009606
with patch_modules(): code = compile(source, "<docs>", "exec") exec(code, self.namespace)
def declare_example(self, source)
Execute the given code, adding it to the runner's namespace.
9.873701
8.115814
1.2166
code = compile(source, "<docs>", "exec") buffer = [] invoke = partial(self.invoke, _output_lines=buffer) def println(text=""): buffer.append(text) exec( code, self.namespace, { "invoke": invoke, "println": println, "isolated_filesystem": self.isolated_filesystem, }, ) return buffer
def run_example(self, source)
Run commands by executing the given code, returning the lines of input and output. The code should be a series of the following functions: * :meth:`invoke`: Invoke a command, adding env vars, input, and output to the output. * ``println(text="")``: Add a line of text to the output. * :meth:`isolated_filesystem`: A context manager that changes to a temporary directory while executing the block.
5.612088
4.468869
1.255819
_setup(verbose=verbose, quiet=quiet, color=color, title=title, timestamp=timestamp)
def setup( *, verbose: bool = False, quiet: bool = False, color: str = "auto", title: str = "auto", timestamp: bool = False ) -> None
Configure behavior of message functions. :param verbose: Whether :func:`debug` messages should get printed :param quiet: Hide every message except :func:`warning`, :func:`error`, and :func:`fatal` :param color: Choices: 'auto', 'always', or 'never'. Whether to color output. By default ('auto'), only use color when output is a terminal. :param title: Ditto for setting terminal title :param timestamp: Whether to prefix every message with a time stamp
3.440093
4.160509
0.826844
# Flatten the list of tokens in case some of them are of # class UnicodeSequence: flat_tokens = list() # type: List[Token] for token in tokens: if isinstance(token, UnicodeSequence): flat_tokens.extend(token.tuple()) else: flat_tokens.append(token) with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True) without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False) return (with_color, without_color)
def process_tokens( tokens: Sequence[Token], *, end: str = "\n", sep: str = " " ) -> Tuple[str, str]
Returns two strings from a list of tokens. One containing ASCII escape codes, the other only the 'normal' characters
3.000441
2.838782
1.056947
if using_colorama(): global _INITIALIZED if not _INITIALIZED: colorama.init() _INITIALIZED = True with_color, without_color = process_tokens(tokens, end=end, sep=sep) if CONFIG["record"]: _MESSAGES.append(without_color) if update_title and with_color: write_title_string(without_color, fileobj) to_write = with_color if config_color(fileobj) else without_color write_and_flush(fileobj, to_write)
def message( *tokens: Token, end: str = "\n", sep: str = " ", fileobj: FileObj = sys.stdout, update_title: bool = False ) -> None
Helper method for error, warning, info, debug
4.598363
4.391107
1.047199
error(*tokens, **kwargs) sys.exit(1)
def fatal(*tokens: Token, **kwargs: Any) -> None
Print an error message and call ``sys.exit``
7.627457
4.602068
1.657398
tokens = [bold, red, "Error:"] + list(tokens) # type: ignore kwargs["fileobj"] = sys.stderr message(*tokens, **kwargs)
def error(*tokens: Token, **kwargs: Any) -> None
Print an error message
7.475709
6.000725
1.245801
tokens = [brown, "Warning:"] + list(tokens) # type: ignore kwargs["fileobj"] = sys.stderr message(*tokens, **kwargs)
def warning(*tokens: Token, **kwargs: Any) -> None
Print a warning message
8.527864
6.582118
1.295611
# We need to know the length of the section: process_tokens_kwargs = kwargs.copy() process_tokens_kwargs["color"] = False no_color = _process_tokens(tokens, **process_tokens_kwargs) info(*tokens, **kwargs) info("-" * len(no_color), end="\n\n")
def info_section(*tokens: Token, **kwargs: Any) -> None
Print an underlined section name
5.603495
5.093201
1.100191
info(bold, blue, "::", reset, *tokens, **kwargs)
def info_1(*tokens: Token, **kwargs: Any) -> None
Print an important informative message
25.310633
14.629177
1.730147
end = "\n" if last else "" info(".", end=end, fileobj=fileobj)
def dot(*, last: bool = False, fileobj: Any = None) -> None
Print a dot without a newline unless it is the last one. Useful when you want to display a progress with very little knowledge. :param last: whether this is the last dot (will insert a newline)
8.582591
11.086362
0.774158
num_digits = len(str(n)) counter_format = "(%{}d/%d)".format(num_digits) counter_str = counter_format % (i + 1, n) info(green, "*", reset, counter_str, reset, *rest, **kwargs)
def info_count(i: int, n: int, *rest: Token, **kwargs: Any) -> None
Display a counter before the rest of the message. ``rest`` and ``kwargs`` are passed to :func:`info` Current index should start at 0 and end at ``n-1``, like in ``enumerate()`` :param i: current index :param n: total number of items
4.959136
5.148888
0.963147
if sys.stdout.isatty(): percent = float(value) / max_value * 100 sys.stdout.write(prefix + ": %.0f%%\r" % percent) sys.stdout.flush()
def info_progress(prefix: str, value: float, max_value: float) -> None
Display info progress in percent. :param value: the current value :param max_value: the max value :param prefix: the prefix message to print
3.001885
3.364559
0.892207
if not CONFIG["verbose"] or CONFIG["record"]: return message(*tokens, **kwargs)
def debug(*tokens: Token, **kwargs: Any) -> None
Print a debug message. Messages are shown only when ``CONFIG["verbose"]`` is true
17.215492
12.202259
1.410845
return [" " * num + l for l in elems]
def indent_iterable(elems: Sequence[str], num: int = 2) -> List[str]
Indent an iterable.
7.078128
6.015532
1.176642
lines = text.splitlines() return "\n".join(indent_iterable(lines, num=num))
def indent(text: str, num: int = 2) -> str
Indent a piece of text.
8.848722
5.800318
1.525558
tb = sys.exc_info()[2] buffer = io.StringIO() traceback.print_tb(tb, file=io) # type: ignore # fmt: off return ( red, message + "\n", exception.__class__.__name__, str(exception), "\n", reset, buffer.getvalue() )
def message_for_exception(exception: Exception, message: str) -> Sequence[Token]
Returns a tuple suitable for cli_ui.error() from the given exception. (Traceback will be part of the message, after the ``message`` argument) Useful when the exception occurs in an other thread than the main one.
5.31242
5.007927
1.060802
tokens = get_ask_tokens(question) if default: tokens.append("(%s)" % default) info(*tokens) answer = read_input() if not answer: return default return answer
def ask_string(*question: Token, default: Optional[str] = None) -> Optional[str]
Ask the user to enter a string.
4.962858
4.915463
1.009642
tokens = get_ask_tokens(question) info(*tokens) answer = read_password() return answer
def ask_password(*question: Token) -> str
Ask the user to enter a password.
14.375617
11.099478
1.295162
if func_desc is None: func_desc = lambda x: str(x) tokens = get_ask_tokens(prompt) info(*tokens) choices.sort(key=func_desc) for i, choice in enumerate(choices, start=1): choice_desc = func_desc(choice) info(" ", blue, "%i" % i, reset, choice_desc) keep_asking = True res = None while keep_asking: answer = read_input() if not answer: return None try: index = int(answer) except ValueError: info("Please enter a valid number") continue if index not in range(1, len(choices) + 1): info(str(index), "is out of range") continue res = choices[index - 1] keep_asking = False return res
def ask_choice( *prompt: Token, choices: List[Any], func_desc: Optional[FuncDesc] = None ) -> Any
Ask the user to choose from a list of choices. :return: the selected choice ``func_desc`` will be called on every list item for displaying and sorting the list. If not given, will default to the identity function. Will loop until: * the user enters a valid index * or leaves the prompt empty In the last case, None will be returned
2.791514
2.890643
0.965707
while True: tokens = [green, "::", reset] + list(question) + [reset] if default: tokens.append("(Y/n)") else: tokens.append("(y/N)") info(*tokens) answer = read_input() if answer.lower() in ["y", "yes"]: return True if answer.lower() in ["n", "no"]: return False if not answer: return default warning("Please answer by 'y' (yes) or 'n' (no) ")
def ask_yes_no(*question: Token, default: bool = False) -> bool
Ask the user to answer by yes or no
3.562034
3.188222
1.117248
if not choices: return message else: result = { difflib.SequenceMatcher(a=user_input, b=choice).ratio(): choice for choice in choices } message += "\nDid you mean: %s?" % result[max(result)] return message
def did_you_mean(message: str, user_input: str, choices: Sequence[str]) -> str
Given a list of choices and an invalid user input, display the closest items in the list that match the input.
3.466416
2.901819
1.194566
end_time = datetime.datetime.now() elapsed_time = end_time - self.start_time elapsed_seconds = elapsed_time.seconds hours, remainder = divmod(int(elapsed_seconds), 3600) minutes, seconds = divmod(remainder, 60) as_str = "%sh %sm %ss %dms" % ( hours, minutes, seconds, elapsed_time.microseconds / 1000, ) info("%s took %s" % (self.description, as_str))
def stop(self) -> None
Stop the timer and emit a nice log
2.401627
2.194098
1.094585
parser = ET.XMLParser(resolve_entities=False, strip_cdata=False, remove_blank_text=True) document = ET.fromstring(xml, parser) return ('%s\n%s' % ('<?xml version="1.0" encoding="UTF-8"?>', ET.tostring(document, pretty_print=True).decode('utf-8'))).encode('utf-8')
def xmllint_format(xml)
Pretty-print XML like ``xmllint`` does. Arguments: xml (string): Serialized XML
2.05238
2.648992
0.774778
log = getLogger('ocrd.resolver.download_to_directory') # pylint: disable=redefined-outer-name log.debug("directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|", directory, url, basename, overwrite, subdir) if url is None: raise Exception("'url' must be a string") if directory is None: raise Exception("'directory' must be a string") if basename is None: if (subdir is not None) or \ (directory and url.startswith('file://%s' % directory)): # in case downloading a url 'file:///tmp/foo/bar' to directory '/tmp/foo' basename = url.rsplit('/', 1)[-1] else: basename = safe_filename(url) if subdir is not None: basename = join(subdir, basename) outfilename = join(directory, basename) if exists(outfilename) and not overwrite: log.debug("File already exists and overwrite=False: %s", outfilename) return outfilename outfiledir = outfilename.rsplit('/', 1)[0] # print(outfiledir) if not isdir(outfiledir): makedirs(outfiledir) log.debug("Downloading <%s> to '%s'", url, outfilename) # de-scheme file:// URL if url.startswith('file://'): url = url[len('file://'):] # Copy files or download remote assets if '://' not in url: copyfile(url, outfilename) else: response = requests.get(url) if response.status_code != 200: raise Exception("Not found: %s (HTTP %d)" % (url, response.status_code)) with open(outfilename, 'wb') as outfile: outfile.write(response.content) return outfilename
def download_to_directory(self, directory, url, basename=None, overwrite=False, subdir=None)
Download a file to the workspace. Early Shortcut: If url is a file://-URL and that file is already in the directory, keep it there. If basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename. If basename is not given and no subdir is given, use the alnum characters in the URL as the basename. Args: directory (string): Directory to download files to basename (string, None): basename part of the filename on disk. url (string): URL to download from overwrite (boolean): Whether to overwrite existing files with that name subdir (string, None): Subdirectory to create within the directory. Think fileGrp. Returns: Local filename
2.76971
2.84056
0.975058
if dst_dir and not dst_dir.startswith('/'): dst_dir = abspath(dst_dir) if mets_url is None: if baseurl is None: raise Exception("Must pass mets_url and/or baseurl to workspace_from_url") else: mets_url = 'file://%s/%s' % (baseurl, mets_basename if mets_basename else 'mets.xml') if baseurl is None: baseurl = mets_url.rsplit('/', 1)[0] log.debug("workspace_from_url\nmets_url='%s'\nbaseurl='%s'\ndst_dir='%s'", mets_url, baseurl, dst_dir) # resolve to absolute if '://' not in mets_url: mets_url = 'file://%s' % abspath(mets_url) if dst_dir is None: # if mets_url is a file-url assume working directory is source directory if mets_url.startswith('file://'): # if dst_dir was not given and mets_url is a file assume that # dst_dir should be the directory where the mets.xml resides dst_dir = dirname(mets_url[len('file://'):]) else: dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX) log.debug("Creating workspace '%s' for METS @ <%s>", dst_dir, mets_url) # if mets_basename is not given, use the last URL segment of the mets_url if mets_basename is None: mets_basename = mets_url \ .rsplit('/', 1)[-1] \ .split('?')[0] \ .split('#')[0] dst_mets = join(dst_dir, mets_basename) log.debug("Copying mets url '%s' to '%s'", mets_url, dst_mets) if 'file://' + dst_mets == mets_url: log.debug("Target and source mets are identical") else: if exists(dst_mets) and not clobber_mets: raise Exception("File '%s' already exists but clobber_mets is false" % dst_mets) else: self.download_to_directory(dst_dir, mets_url, basename=mets_basename) workspace = Workspace(self, dst_dir, mets_basename=mets_basename, baseurl=baseurl) if download: for f in workspace.mets.find_files(): workspace.download_file(f) return workspace
def workspace_from_url(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None, download=False, baseurl=None)
Create a workspace from a METS by URL. Sets the mets.xml file Arguments: mets_url (string): Source mets URL dst_dir (string, None): Target directory for the workspace clobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception. download (boolean, False): Whether to download all the files baseurl (string, None): Base URL for resolving relative file locations Returns: Workspace
2.424286
2.47453
0.979696
if directory is None: directory = tempfile.mkdtemp(prefix=TMP_PREFIX) if not exists(directory): makedirs(directory) mets_fpath = join(directory, mets_basename) if not clobber_mets and exists(mets_fpath): raise Exception("Not clobbering existing mets.xml in '%s'." % directory) mets = OcrdMets.empty_mets() with open(mets_fpath, 'wb') as fmets: log.info("Writing %s", mets_fpath) fmets.write(mets.to_xml(xmllint=True)) return Workspace(self, directory, mets)
def workspace_from_nothing(self, directory, mets_basename='mets.xml', clobber_mets=False)
Create an empty workspace.
2.635131
2.549923
1.033416
root = self._tree.getroot() ret = ET.tostring(ET.ElementTree(root), pretty_print=True) if xmllint: ret = xmllint_format(ret) return ret
def to_xml(self, xmllint=False)
Serialize all properties as pretty-printed XML Args: xmllint (boolean): Format with ``xmllint`` in addition to pretty-printing
3.140363
3.737781
0.840168
ctx.obj = WorkspaceCtx(os.path.abspath(directory), mets_basename, automatic_backup=backup)
def workspace_cli(ctx, directory, mets_basename, backup)
Working with workspace
6.040464
5.859535
1.030878
workspace = ctx.resolver.workspace_from_url( mets_url, dst_dir=os.path.abspath(workspace_dir if workspace_dir else mkdtemp(prefix=TMP_PREFIX)), mets_basename=ctx.mets_basename, clobber_mets=clobber_mets, download=download, ) workspace.save_mets() print(workspace.directory)
def workspace_clone(ctx, clobber_mets, download, mets_url, workspace_dir)
Create a workspace from a METS_URL and return the directory METS_URL can be a URL, an absolute path or a path relative to $PWD. If WORKSPACE_DIR is not provided, creates a temporary directory.
3.659447
3.697659
0.989666
workspace = ctx.resolver.workspace_from_nothing( directory=os.path.abspath(directory), mets_basename=ctx.mets_basename, clobber_mets=clobber_mets ) workspace.save_mets() print(workspace.directory)
def workspace_create(ctx, clobber_mets, directory)
Create a workspace with an empty METS file in DIRECTORY. Use '.' for $PWD"
4.091829
4.148216
0.986407