_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q14700
|
raise_api_error
|
train
|
def raise_api_error(resp, state=None):
"""Raise an exception with a pretty message in various states of upload"""
# TODO: Refactor into an Exception class
error_code = resp.status_code
if error_code == 402:
error_message = (
"Please add a payment method to upload more samples. If you continue to "
"experience problems, contact us at help@onecodex.com for assistance."
)
elif error_code == 403:
error_message = "Please login to your One Codex account or pass the appropriate API key."
else:
try:
error_json = resp.json()
except ValueError:
error_json = {}
if "msg" in error_json:
error_message = error_json["msg"].rstrip(".")
elif "message" in error_json:
error_message = error_json["message"].rstrip(".")
else:
error_message = None
if state == "init" and not error_message:
error_message = (
"Could not initialize upload. Are you logged in? If this problem "
"continues, please contact help@onecodex.com for assistance."
)
elif state == "upload" and not error_message:
error_message = (
"File could not be uploaded. If this problem continues, please contact "
"help@onecodex.com for assistance."
)
elif state == "callback" and not error_message:
error_message = (
"Callback could not be completed. If this problem continues, please "
"contact help@onecodex.com for assistance."
)
if error_message is None:
error_message = "Upload failed. Please contact help@onecodex.com for assistance."
raise UploadException(error_message)
|
python
|
{
"resource": ""
}
|
q14701
|
pretty_print_error
|
train
|
def pretty_print_error(err_json):
"""Pretty print Flask-Potion error messages for the user."""
# Special case validation errors
if len(err_json) == 1 and "validationOf" in err_json[0]:
required_fields = ", ".join(err_json[0]["validationOf"]["required"])
return "Validation error. Requires properties: {}.".format(required_fields)
# General error handling
msg = "; ".join(err.get("message", "") for err in err_json)
# Fallback
if not msg:
msg = "Bad request."
return msg
|
python
|
{
"resource": ""
}
|
q14702
|
OneCodexBase.delete
|
train
|
def delete(self):
"""Delete this object from the One Codex server."""
check_bind(self)
if self.id is None:
raise ServerError("{} object does not exist yet".format(self.__class__.name))
elif not self.__class__._has_schema_method("destroy"):
raise MethodNotSupported("{} do not support deletion.".format(self.__class__.__name__))
try:
self._resource.delete()
except HTTPError as e:
if e.response.status_code == 403:
raise PermissionDenied("") # FIXME: is this right?
else:
raise e
|
python
|
{
"resource": ""
}
|
q14703
|
OneCodexBase.save
|
train
|
def save(self):
"""Either create or persist changes on this object back to the One Codex server."""
check_bind(self)
creating = self.id is None
if creating and not self.__class__._has_schema_method("create"):
raise MethodNotSupported("{} do not support creating.".format(self.__class__.__name__))
if not creating and not self.__class__._has_schema_method("update"):
raise MethodNotSupported("{} do not support updating.".format(self.__class__.__name__))
try:
self._resource.save()
except HTTPError as e:
if e.response.status_code == 400:
err_json = e.response.json().get("errors", [])
msg = pretty_print_error(err_json)
raise ServerError(msg)
elif e.response.status_code == 404:
action = "creating" if creating else "updating"
raise MethodNotSupported(
"{} do not support {}.".format(self.__class__.__name__, action)
)
elif e.response.status_code == 409:
raise ServerError("This {} object already exists".format(self.__class__.__name__))
else:
raise e
|
python
|
{
"resource": ""
}
|
q14704
|
interleaved_filename
|
train
|
def interleaved_filename(file_path):
"""Return filename used to represent a set of paired-end files. Assumes Illumina-style naming
conventions where each file has _R1_ or _R2_ in its name."""
if not isinstance(file_path, tuple):
raise OneCodexException("Cannot get the interleaved filename without a tuple.")
if re.match(".*[._][Rr][12][_.].*", file_path[0]):
return re.sub("[._][Rr][12]", "", file_path[0])
else:
warnings.warn("Paired-end filenames do not match--are you sure they are correct?")
return file_path[0]
|
python
|
{
"resource": ""
}
|
q14705
|
_file_size
|
train
|
def _file_size(file_path, uncompressed=False):
"""Return size of a single file, compressed or uncompressed"""
_, ext = os.path.splitext(file_path)
if uncompressed:
if ext in {".gz", ".gzip"}:
with gzip.GzipFile(file_path, mode="rb") as fp:
try:
fp.seek(0, os.SEEK_END)
return fp.tell()
except ValueError:
# on python2, cannot seek from end and must instead read to end
fp.seek(0)
while len(fp.read(8192)) != 0:
pass
return fp.tell()
elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}:
with bz2.BZ2File(file_path, mode="rb") as fp:
fp.seek(0, os.SEEK_END)
return fp.tell()
return os.path.getsize(file_path)
|
python
|
{
"resource": ""
}
|
q14706
|
_call_init_upload
|
train
|
def _call_init_upload(file_name, file_size, metadata, tags, project, samples_resource):
"""Call init_upload at the One Codex API and return data used to upload the file.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
file_size : `integer`
Accurate size of file to be uploaded, in bytes.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Returns
-------
`dict`
Contains, at a minimum, 'upload_url' and 'sample_id'. Should also contain various additional
data used to upload the file to fastx-proxy, a user's S3 bucket, or an intermediate bucket.
"""
upload_args = {
"filename": file_name,
"size": file_size,
"upload_type": "standard", # this is multipart form data
}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
return upload_info
|
python
|
{
"resource": ""
}
|
q14707
|
_make_retry_fields
|
train
|
def _make_retry_fields(file_name, metadata, tags, project):
"""Generate fields to send to init_multipart_upload in the case that a Sample upload via
fastx-proxy fails.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
Returns
-------
`dict`
Contains metadata fields that will be integrated into the Sample model created when
init_multipart_upload is called.
"""
upload_args = {"filename": file_name}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
return upload_args
|
python
|
{
"resource": ""
}
|
q14708
|
_direct_upload
|
train
|
def _direct_upload(file_obj, file_name, fields, session, samples_resource):
"""Uploads a single file-like object via our validating proxy. Maintains compatibility with direct upload
to a user's S3 bucket as well in case we disable our validating proxy.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
RetryableUploadException
In cases where the proxy is temporarily down or we experience connectivity issues
UploadException
In other cases where the proxy determines the upload is invalid and should *not* be retried.
"""
# need an OrderedDict to preserve field order for S3, required for Python 2.7
multipart_fields = OrderedDict()
for k, v in fields["additional_fields"].items():
multipart_fields[str(k)] = str(v)
# this attribute is only in FASTXInterleave and FilePassthru
mime_type = getattr(file_obj, "mime_type", "text/plain")
multipart_fields["file"] = (file_name, file_obj, mime_type)
encoder = MultipartEncoder(multipart_fields)
upload_request = None
try:
upload_request = session.post(
fields["upload_url"],
data=encoder,
headers={"Content-Type": encoder.content_type},
auth={},
)
except requests.exceptions.ConnectionError:
pass
# If we expect a status *always* try to check it,
# waiting up to 4 hours for buffering to complete (~30-50GB file gzipped)
if "status_url" in fields["additional_fields"]:
now = time.time()
while time.time() < (now + 60 * 60 * 4):
try:
resp = session.post(
fields["additional_fields"]["status_url"],
json={"sample_id": fields["sample_id"]},
)
resp.raise_for_status()
except (ValueError, requests.exceptions.RequestException) as e:
logging.debug("Retrying due to error: {}".format(e))
raise RetryableUploadException(
"Unexpected failure of direct upload proxy. Retrying..."
)
if resp.json() and resp.json().get("complete", True) is False:
logging.debug("Blocking on waiting for proxy to complete (in progress)...")
time.sleep(30)
else:
break
# Return is successfully processed
if resp.json().get("code") in [200, 201]:
file_obj.close()
return
elif resp.json().get("code") == 500:
logging.debug("Retrying due to 500 from proxy...")
raise RetryableUploadException("Unexpected issue with direct upload proxy. Retrying...")
else:
raise_api_error(resp, state="upload")
# Direct to S3 case
else:
file_obj.close()
if upload_request.status_code not in [200, 201]:
raise RetryableUploadException("Unknown connectivity issue with proxy upload.")
# Issue a callback -- this only happens in the direct-to-S3 case
try:
if not fields["additional_fields"].get("callback_url"):
samples_resource.confirm_upload(
{"sample_id": fields["sample_id"], "upload_type": "standard"}
)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="callback")
except requests.exceptions.ConnectionError:
raise_connectivity_error()
|
python
|
{
"resource": ""
}
|
q14709
|
upload_sequence_fileobj
|
train
|
def upload_sequence_fileobj(file_obj, file_name, fields, retry_fields, session, samples_resource):
"""Uploads a single file-like object to the One Codex server via either fastx-proxy or directly
to S3.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
retry_fields : `dict`
Metadata sent to `init_multipart_upload` in the case that the upload via fastx-proxy fails.
session : `requests.Session`
Connection to One Codex API.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample ID of newly uploaded file.
"""
# First attempt to upload via our validating proxy
try:
_direct_upload(file_obj, file_name, fields, session, samples_resource)
sample_id = fields["sample_id"]
except RetryableUploadException:
# upload failed--retry direct upload to S3 intermediate
logging.error("{}: Connectivity issue, trying direct upload...".format(file_name))
file_obj.seek(0) # reset file_obj back to start
try:
retry_fields = samples_resource.init_multipart_upload(retry_fields)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
retry_fields,
session,
samples_resource._client._root_url + retry_fields["callback_url"], # full callback url
)
sample_id = s3_upload.get("sample_id", "<UUID not yet assigned>")
logging.info("{}: finished as sample {}".format(file_name, sample_id))
return sample_id
|
python
|
{
"resource": ""
}
|
q14710
|
upload_document
|
train
|
def upload_document(file_path, session, documents_resource, progressbar=None):
"""Uploads multiple document files to the One Codex server directly to S3 via an intermediate
bucket.
Parameters
----------
file_path : `str`
A path to a file on the system.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` methods.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
A `str` document ID for the newly uploaded file.
"""
if not isinstance(file_path, six.string_types):
raise ValueError(
"Expected file_path to be a string, got {}".format(type(file_path).__name__)
)
file_name, file_size, _ = _file_stats(file_path, enforce_fastx=False)
# disable progressbar while keeping context manager
if not progressbar:
progressbar = FakeProgressBar()
with progressbar as bar:
fobj = FilePassthru(file_path, file_size, bar)
document_id = upload_document_fileobj(fobj, file_name, session, documents_resource)
bar.finish()
return document_id
|
python
|
{
"resource": ""
}
|
q14711
|
upload_document_fileobj
|
train
|
def upload_document_fileobj(file_obj, file_name, session, documents_resource, log=None):
"""Uploads a single file-like object to the One Codex server directly to S3.
Parameters
----------
file_obj : `FilePassthru`, or a file-like object
If a file-like object is given, its mime-type will be sent as 'text/plain'. Otherwise,
`FilePassthru` will send a compressed type if the file is gzip'd or bzip'd.
file_name : `string`
The file_name you wish to associate this file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Notes
-----
In contrast to `upload_sample_fileobj`, this method will /only/ upload to an S3 intermediate
bucket--not via our direct proxy or directly to a user's S3 bucket with a signed request.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample UUID of newly uploaded file.
"""
try:
fields = documents_resource.init_multipart_upload()
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
fields,
session,
documents_resource._client._root_url + fields["callback_url"], # full callback url
)
document_id = s3_upload.get("document_id", "<UUID not yet assigned>")
logging.info("{}: finished as document {}".format(file_name, document_id))
return document_id
|
python
|
{
"resource": ""
}
|
q14712
|
_s3_intermediate_upload
|
train
|
def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):
"""Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# actually do the upload
client = boto3.client(
"s3",
aws_access_key_id=fields["upload_aws_access_key_id"],
aws_secret_access_key=fields["upload_aws_secret_access_key"],
)
# if boto uses threads, ctrl+c won't work
config = TransferConfig(use_threads=False)
# let boto3 update our progressbar rather than our FASTX wrappers, if applicable
boto_kwargs = {}
if hasattr(file_obj, "progressbar"):
boto_kwargs["Callback"] = file_obj.progressbar.update
file_obj.progressbar = None
try:
client.upload_fileobj(
file_obj,
fields["s3_bucket"],
fields["file_id"],
ExtraArgs={"ServerSideEncryption": "AES256"},
Config=config,
**boto_kwargs
)
except S3UploadFailedError:
raise_connectivity_error(file_name)
# issue a callback
try:
resp = session.post(
callback_url,
json={
"s3_path": "s3://{}/{}".format(fields["s3_bucket"], fields["file_id"]),
"filename": file_name,
"import_as_document": fields.get("import_as_document", False),
},
)
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
if resp.status_code != 200:
raise_connectivity_error(file_name)
try:
return resp.json()
except ValueError:
return {}
|
python
|
{
"resource": ""
}
|
q14713
|
merge_strings_files
|
train
|
def merge_strings_files(old_strings_file, new_strings_file):
""" Merges the old strings file with the new one.
Args:
old_strings_file (str): The path to the old strings file (previously produced, and possibly altered)
new_strings_file (str): The path to the new strings file (newly produced).
"""
old_localizable_dict = generate_localization_key_to_entry_dictionary_from_file(old_strings_file)
output_file_elements = []
f = open_strings_file(new_strings_file, "r+")
for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):
if len(header_comment) > 0:
output_file_elements.append(Comment(header_comment))
localize_value = value
if key in old_localizable_dict:
localize_value = old_localizable_dict[key].value
output_file_elements.append(LocalizationEntry(comments, key, localize_value))
f.close()
write_file_elements_to_strings_file(old_strings_file, output_file_elements)
|
python
|
{
"resource": ""
}
|
q14714
|
LocalizationCommandLineOperation.configure_parser
|
train
|
def configure_parser(self, parser):
"""
Adds the necessary supported arguments to the argument parser.
Args:
parser (argparse.ArgumentParser): The parser to add arguments to.
"""
parser.add_argument("--log_path", default="", help="The log file path")
parser.add_argument("--verbose", help="Increase logging verbosity", action="store_true")
|
python
|
{
"resource": ""
}
|
q14715
|
LocalizationCommandLineOperation.run_with_standalone_parser
|
train
|
def run_with_standalone_parser(self):
"""
Will run the operation as standalone with a new ArgumentParser
"""
parser = argparse.ArgumentParser(description=self.description())
self.configure_parser(parser)
self.run(parser.parse_args())
|
python
|
{
"resource": ""
}
|
q14716
|
DistanceMixin.alpha_diversity
|
train
|
def alpha_diversity(self, metric="simpson", rank="auto"):
"""Caculate the diversity within a community.
Parameters
----------
metric : {'simpson', 'chao1', 'shannon'}
The diversity metric to calculate.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
pandas.DataFrame, a distance matrix.
"""
if metric not in ("simpson", "chao1", "shannon"):
raise OneCodexException(
"For alpha diversity, metric must be one of: simpson, chao1, shannon"
)
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("Alpha diversity requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
output = {"classification_id": [], metric: []}
for c_id in df.index:
output["classification_id"].append(c_id)
output[metric].append(
skbio.diversity.alpha_diversity(metric, df.loc[c_id].tolist(), [c_id]).values[0]
)
return pd.DataFrame(output).set_index("classification_id")
|
python
|
{
"resource": ""
}
|
q14717
|
DistanceMixin.beta_diversity
|
train
|
def beta_diversity(self, metric="braycurtis", rank="auto"):
"""Calculate the diversity between two communities.
Parameters
----------
metric : {'jaccard', 'braycurtis', 'cityblock'}
The distance metric to calculate.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
"""
if metric not in ("jaccard", "braycurtis", "cityblock"):
raise OneCodexException(
"For beta diversity, metric must be one of: jaccard, braycurtis, cityblock"
)
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("Beta diversity requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
counts = []
for c_id in df.index:
counts.append(df.loc[c_id].tolist())
return skbio.diversity.beta_diversity(metric, counts, df.index.tolist())
|
python
|
{
"resource": ""
}
|
q14718
|
DistanceMixin.unifrac
|
train
|
def unifrac(self, weighted=True, rank="auto"):
"""A beta diversity metric that takes into account the relative relatedness of community
members. Weighted UniFrac looks at abundances, unweighted UniFrac looks at presence.
Parameters
----------
weighted : `bool`
Calculate the weighted (True) or unweighted (False) distance metric.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
"""
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("UniFrac requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
counts = []
for c_id in df.index:
counts.append(df.loc[c_id].tolist())
tax_ids = df.keys().tolist()
tree = self.tree_build()
tree = self.tree_prune_rank(tree, rank=df.ocx_rank)
# there's a bug (?) in skbio where it expects the root to only have
# one child, so we do a little faking here
from skbio.tree import TreeNode
new_tree = TreeNode(name="fake root")
new_tree.rank = "no rank"
new_tree.append(tree)
# then finally run the calculation and return
if weighted:
return skbio.diversity.beta_diversity(
"weighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
)
else:
return skbio.diversity.beta_diversity(
"unweighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
)
|
python
|
{
"resource": ""
}
|
q14719
|
fetch_api_key_from_uname
|
train
|
def fetch_api_key_from_uname(username, password, server_url):
"""
Retrieves an API key from the One Codex webpage given the username and password
"""
# TODO: Hit programmatic endpoint to fetch JWT key, not API key
with requests.Session() as session:
# get the login page normally
text = session.get(server_url + "login").text
# retrieve the CSRF token out of it
csrf = re.search('type="hidden" value="([^"]+)"', text).group(1)
# and resubmit using the username/password *and* the CSRF
login_data = {
"email": username,
"password": password,
"csrf_token": csrf,
"next": "/api/get_token",
}
page = session.post(server_url + "login", data=login_data)
try:
key = page.json()["key"]
except (ValueError, KeyError): # ValueError includes simplejson.decoder.JSONDecodeError
key = None
return key
|
python
|
{
"resource": ""
}
|
q14720
|
check_version
|
train
|
def check_version(version, server):
"""Check if the current CLI version is supported by the One Codex backend.
Parameters
----------
version : `string`
Current version of the One Codex client library
server : `string`
Complete URL to One Codex server, e.g., https://app.onecodex.com
Returns
-------
`tuple` containing two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None.
"""
def version_inadequate(client_version, server_version):
"""Simple, fast check for version inequality.
Could use python package `semver` if we need more precise checks in
edge cases, but this generally works for now.
"""
client_version = tuple([int(x) for x in client_version.split("-")[0].split(".")])
server_version = tuple([int(x) for x in server_version.split(".")])
return client_version < server_version
# this will probably live on /api/v0 forever for compat with older CLI versions
data = requests.post(server + "api/v0/check_for_cli_update", data={"version": version})
if data.status_code != 200:
return False, "Error connecting to server"
data = data.json()
latest_version = data["latest_version"]
if version_inadequate(version, latest_version):
return (
True,
(
"Please upgrade your client to the latest version (v{}) using the command "
"`pip install --upgrade onecodex`".format(latest_version)
),
)
return False, None
|
python
|
{
"resource": ""
}
|
q14721
|
pprint
|
train
|
def pprint(j, no_pretty):
"""
Prints as formatted JSON
"""
if not no_pretty:
click.echo(
json.dumps(j, cls=PotionJSONEncoder, sort_keys=True, indent=4, separators=(",", ": "))
)
else:
click.echo(j)
|
python
|
{
"resource": ""
}
|
q14722
|
is_insecure_platform
|
train
|
def is_insecure_platform():
"""
Checks if the current system is missing an SSLContext object
"""
v = sys.version_info
if v.major == 3:
return False # Python 2 issue
if v.major == 2 and v.minor == 7 and v.micro >= 9:
return False # >= 2.7.9 includes the new SSL updates
try:
import OpenSSL # noqa
import ndg # noqa
import pyasn1 # noqa
except ImportError:
pass
return True
|
python
|
{
"resource": ""
}
|
q14723
|
warn_if_insecure_platform
|
train
|
def warn_if_insecure_platform():
"""
Produces a nice message if SSLContext object is not available.
Also returns True -> platform is insecure
False -> platform is secure
"""
m = (
"\n"
"######################################################################################\n" # noqa
"# #\n" # noqa
"# Your version of Python appears to be out of date and lack important security #\n" # noqa
"# features. Please update to Python >= 2.7.9 or `pip install requests[security]`. #\n" # noqa
"# #\n" # noqa
"# InsecurePlatformWarning: A true SSLContext object is not available. This #\n" # noqa
"# prevents urllib3 from configuring SSL appropriately and may cause certain #\n" # noqa
"# SSL connections to fail. For more information, see #\n" # noqa
"# https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning. #\n" # noqa
"# #\n" # noqa
"######################################################################################\n"
) # noqa
if is_insecure_platform():
click.echo(m, err=True)
return True
else:
cli_log.debug("Python SSLContext passed")
return False
|
python
|
{
"resource": ""
}
|
q14724
|
download_file_helper
|
train
|
def download_file_helper(url, input_path):
"""
Manages the chunked downloading of a file given an url
"""
r = requests.get(url, stream=True)
if r.status_code != 200:
cli_log.error("Failed to download file: %s" % r.json()["message"])
local_full_path = get_download_dest(input_path, r.url)
original_filename = os.path.split(local_full_path)[-1]
with open(local_full_path, "wb") as f:
click.echo("Downloading {}".format(original_filename), err=True)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
pprint("Successfully downloaded %s to %s" % (original_filename, local_full_path), True)
|
python
|
{
"resource": ""
}
|
q14725
|
check_for_allowed_file
|
train
|
def check_for_allowed_file(f):
"""
Checks a file extension against a list of seq file exts
"""
for ext in SUPPORTED_EXTENSIONS:
if f.endswith(ext):
return True
log.error("Failed upload: Not an allowed file extension: %s", f)
raise SystemExit
|
python
|
{
"resource": ""
}
|
q14726
|
TaxonomyMixin.tree_build
|
train
|
def tree_build(self):
"""Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or
`SampleCollection`.
Returns
-------
`skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node.
"""
from skbio.tree import TreeNode
# build all the nodes
nodes = {}
for tax_id in self.taxonomy.index:
node = TreeNode(name=tax_id, length=1)
node.tax_name = self.taxonomy["name"][tax_id]
node.rank = self.taxonomy["rank"][tax_id]
node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id]
nodes[tax_id] = node
# generate all the links
for tax_id in self.taxonomy.index:
try:
parent = nodes[nodes[tax_id].parent_tax_id]
except KeyError:
if tax_id != "1":
warnings.warn(
"tax_id={} has parent_tax_id={} which is not in tree"
"".format(tax_id, nodes[tax_id].parent_tax_id)
)
continue
parent.append(nodes[tax_id])
return nodes["1"]
|
python
|
{
"resource": ""
}
|
q14727
|
TaxonomyMixin.tree_prune_tax_ids
|
train
|
def tree_prune_tax_ids(self, tree, tax_ids):
"""Prunes a tree back to contain only the tax_ids in the list and their parents.
Parameters
----------
tree : `skbio.tree.TreeNode`
The root node of the tree to perform this operation on.
tax_ids : `list`
A `list` of taxonomic IDs to keep in the tree.
Returns
-------
`skbio.tree.TreeNode`, the root of a tree containing the given taxonomic IDs and their
parents, leading back to the root node.
"""
tax_ids_to_keep = []
for tax_id in tax_ids:
tax_ids_to_keep.append(tax_id)
tax_ids_to_keep.extend([x.name for x in tree.find(tax_id).ancestors()])
tree = tree.copy()
tree.remove_deleted(lambda n: n.name not in tax_ids_to_keep)
return tree
|
python
|
{
"resource": ""
}
|
q14728
|
TaxonomyMixin.tree_prune_rank
|
train
|
def tree_prune_rank(self, tree, rank="species"):
"""Takes a TreeNode tree and prunes off any tips not at the specified rank and backwards up
until all of the tips are at the specified rank.
Parameters
----------
tree : `skbio.tree.TreeNode`
The root node of the tree to perform this operation on.
rank : {kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
`skbio.tree.TreeNode`, the root of the tree where all tips are at the given rank, and all
tips have a path back to the root node.
Examples
--------
tree_prune_rank(tree, 'species') will remove all subspecies/strain nodes and return a tree
containing all genus-level nodes and higher.
"""
if rank is None:
return tree.copy()
tree = tree.copy()
for node in tree.postorder():
if node.rank == rank:
node._above_rank = True
elif any([getattr(n, "_above_rank", False) for n in node.children]):
node._above_rank = True
else:
node._above_rank = False
tree.remove_deleted(lambda n: not getattr(n, "_above_rank", False))
return tree
|
python
|
{
"resource": ""
}
|
q14729
|
write_file_elements_to_strings_file
|
train
|
def write_file_elements_to_strings_file(file_path, file_elements):
""" Write elements to the string file
Args:
file_path (str): The path to the strings file
file_elements (list) : List of elements to write to the file.
"""
f = open_strings_file(file_path, "w")
for element in file_elements:
f.write(unicode(element))
f.write(u"\n")
f.close()
|
python
|
{
"resource": ""
}
|
q14730
|
setup_logging
|
train
|
def setup_logging(args=None):
""" Setup logging module.
Args:
args (optional): The arguments returned by the argparse module.
"""
logging_level = logging.WARNING
if args is not None and args.verbose:
logging_level = logging.INFO
config = {"level": logging_level, "format": "jtlocalize:%(message)s"}
if args is not None and args.log_path != "":
config["filename"] = args.log_path
logging.basicConfig(**config)
|
python
|
{
"resource": ""
}
|
q14731
|
extract_header_comment_key_value_tuples_from_file
|
train
|
def extract_header_comment_key_value_tuples_from_file(file_descriptor):
""" Extracts tuples representing comments and localization entries from strings file.
Args:
file_descriptor (file): The file to read the tuples from
Returns:
list : List of tuples representing the headers and localization entries.
"""
file_data = file_descriptor.read()
findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, re.MULTILINE | re.DOTALL)
returned_list = []
for header_comment, _ignored, raw_comments, key, value in findall_result:
comments = re.findall("/\* (.*?) \*/", raw_comments)
if len(comments) == 0:
comments = [u""]
returned_list.append((header_comment, comments, key, value))
return returned_list
|
python
|
{
"resource": ""
}
|
q14732
|
extract_jtl_string_pairs_from_text_file
|
train
|
def extract_jtl_string_pairs_from_text_file(results_dict, file_path):
""" Extracts all string pairs matching the JTL pattern from given text file.
This can be used as an "extract_func" argument in the extract_string_pairs_in_directory method.
Args:
results_dict (dict): The dict to add the the string pairs to.
file_path (str): The path of the file from which to extract the string pairs.
"""
result_pairs = re.findall(JTL_REGEX, open(file_path).read())
for result_key, result_comment in result_pairs:
results_dict[result_key] = result_comment
return results_dict
|
python
|
{
"resource": ""
}
|
q14733
|
extract_string_pairs_in_directory
|
train
|
def extract_string_pairs_in_directory(directory_path, extract_func, filter_func):
""" Retrieves all string pairs in the directory
Args:
directory_path (str): The path of the directory containing the file to extract string pairs from.
extract_func (function): Function for extracting the localization keys and comments from the files.
The extract function receives 2 parameters:
- dict that the keys (a key in the dict) and comments (a value in the dict) are added to.
- str representing file path
filter_func (function): Function for filtering files in the directory.
The filter function receives the file name and returns a bool representing the filter result.
True if the file name passed the filter, False otherwise.
Returns:
dict: A mapping between string pairs first value (probably the key), and the second value (probably the comment).
"""
result = {}
for root, dirnames, filenames in os.walk(directory_path):
for file_name in filenames:
if filter_func(file_name):
file_path = os.path.join(root, file_name)
try:
extract_func(result, file_path)
except Exception as e:
print "Error in file " + file_name
print e
return result
|
python
|
{
"resource": ""
}
|
q14734
|
write_entry_to_file
|
train
|
def write_entry_to_file(file_descriptor, entry_comment, entry_key):
""" Writes a localization entry to the file
Args:
file_descriptor (file, instance): The file to write the entry to.
entry_comment (str): The entry's comment.
entry_key (str): The entry's key.
"""
escaped_key = re.sub(r'([^\\])"', '\\1\\"', entry_key)
file_descriptor.write(u'/* %s */\n' % entry_comment)
file_descriptor.write(u'"%s" = "%s";\n' % (escaped_key, escaped_key))
|
python
|
{
"resource": ""
}
|
q14735
|
append_dictionary_to_file
|
train
|
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):
""" Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section.
"""
output_file = open_strings_file(file_path, "a")
write_section_header_to_file(output_file, section_name)
for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):
output_file.write(u'\n')
write_entry_to_file(output_file, entry_comment, entry_key)
output_file.close()
|
python
|
{
"resource": ""
}
|
q14736
|
write_dict_to_new_file
|
train
|
def write_dict_to_new_file(file_name, localization_key_to_comment):
""" Writes dictionary of localization keys and comments to a file.
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_name (str): The path of the file to append to.
"""
output_file_descriptor = open_strings_file(file_name, "w")
for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):
write_entry_to_file(output_file_descriptor, entry_comment, entry_key)
output_file_descriptor.write(u'\n')
output_file_descriptor.close()
|
python
|
{
"resource": ""
}
|
q14737
|
find_files
|
train
|
def find_files(base_dir, extensions, exclude_dirs=list()):
""" Find all files matching the given extensions.
Args:
base_dir (str): Path of base directory to search in.
extensions (list): A list of file extensions to search for.
exclude_dirs (list): A list of directories to exclude from search.
Returns:
list of paths that match the search
"""
result = []
for root, dir_names, file_names in os.walk(base_dir):
for filename in file_names:
candidate = os.path.join(root, filename)
if should_include_file_in_search(candidate, extensions, exclude_dirs):
result.append(candidate)
return result
|
python
|
{
"resource": ""
}
|
q14738
|
should_include_file_in_search
|
train
|
def should_include_file_in_search(file_name, extensions, exclude_dirs):
""" Whether or not a filename matches a search criteria according to arguments.
Args:
file_name (str): A file path to check.
extensions (list): A list of file extensions file should match.
exclude_dirs (list): A list of directories to exclude from search.
Returns:
A boolean of whether or not file matches search criteria.
"""
return (exclude_dirs is None or not any(file_name.startswith(d) for d in exclude_dirs)) and \
any(file_name.endswith(e) for e in extensions)
|
python
|
{
"resource": ""
}
|
q14739
|
AnalysisMixin._get_auto_rank
|
train
|
def _get_auto_rank(self, rank):
"""Tries to figure out what rank we should use for analyses"""
if rank == "auto":
# if we're an accessor for a ClassificationsDataFrame, use its _rank property
if self.__class__.__name__ == "OneCodexAccessor":
return self._rank
if self._field == "abundance":
return "species"
else:
return "genus"
else:
return rank
|
python
|
{
"resource": ""
}
|
q14740
|
AnalysisMixin._guess_normalized
|
train
|
def _guess_normalized(self):
"""Returns true if the collated counts in `self._results` appear to be normalized.
Notes
-----
It's possible that the _results df has already been normalized, which can cause some
methods to fail. This method lets us guess whether that's true and act accordingly.
"""
return (
getattr(self, "_normalized", False)
or getattr(self, "_field", None) == "abundance"
or bool((self._results.sum(axis=1).round(4) == 1.0).all())
)
|
python
|
{
"resource": ""
}
|
q14741
|
AnalysisMixin.to_df
|
train
|
def to_df(
self,
rank="auto",
top_n=None,
threshold=None,
remove_zeros=True,
normalize="auto",
table_format="wide",
):
"""Takes the ClassificationsDataFrame associated with these samples, or SampleCollection,
does some filtering, and returns a ClassificationsDataFrame copy.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
top_n : `integer`, optional
Return only the top N most abundant taxa.
threshold : `float`, optional
Return only taxa more abundant than this threshold in one or more samples.
remove_zeros : `bool`, optional
Do not return taxa that have zero abundance in every sample.
normalize : {'auto', True, False}
Convert read counts to relative abundances (each sample sums to 1.0).
table_format : {'long', 'wide'}
If wide, rows are classifications, cols are taxa, elements are counts. If long, rows are
observations with three cols each: classification_id, tax_id, and count.
Returns
-------
`ClassificationsDataFrame`
"""
from onecodex.dataframes import ClassificationsDataFrame
rank = self._get_auto_rank(rank)
df = self._results.copy()
# subset by taxa
if rank:
if rank == "kingdom":
warnings.warn(
"Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, "
"Archaea and Eukaryota."
)
tax_ids_to_keep = []
for tax_id in df.keys():
if self.taxonomy["rank"][tax_id] == rank:
tax_ids_to_keep.append(tax_id)
if len(tax_ids_to_keep) == 0:
raise OneCodexException("No taxa kept--is rank ({}) correct?".format(rank))
df = df.loc[:, tax_ids_to_keep]
# normalize
if normalize is False and self._guess_normalized():
raise OneCodexException("Data has already been normalized and this can not be undone.")
if normalize is True or (
normalize == "auto" and rank is not None and self._field != "abundance"
):
df = df.div(df.sum(axis=1), axis=0)
# remove columns (tax_ids) with no values that are > 0
if remove_zeros:
df = df.loc[:, (df != 0).any(axis=0)]
# restrict to taxa appearing in one or more samples at the given threshold
if threshold:
df = df.loc[:, df.max() >= threshold]
# restrict to N most abundant taxa
if top_n:
idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index
df = df.loc[:, idx]
# additional data to copy into the ClassificationsDataFrame
ocx_data = {
"ocx_metadata": self.metadata.copy(),
"ocx_rank": rank,
"ocx_field": self._field,
"ocx_taxonomy": self.taxonomy.copy(),
"ocx_normalized": normalize,
}
# generate long-format table
if table_format == "long":
long_df = {"classification_id": [], "tax_id": [], self._field: []}
for t_id in df:
for c_id, count in df[t_id].iteritems():
long_df["classification_id"].append(c_id)
long_df["tax_id"].append(t_id)
long_df[self._field].append(count)
results_df = ClassificationsDataFrame(long_df, **ocx_data)
elif table_format == "wide":
results_df = ClassificationsDataFrame(df, **ocx_data)
else:
raise OneCodexException("table_format must be one of: long, wide")
return results_df
|
python
|
{
"resource": ""
}
|
q14742
|
OneCodexPDFExporter.from_notebook_node
|
train
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Takes output of OneCodexHTMLExporter and runs Weasyprint to get a PDF."""
from weasyprint import HTML, CSS
nb = copy.deepcopy(nb)
output, resources = super(OneCodexPDFExporter, self).from_notebook_node(
nb, resources=resources, **kw
)
buf = BytesIO()
HTML(string=output).write_pdf(
buf, stylesheets=[CSS(os.path.join(ASSETS_PATH, CSS_TEMPLATE_FILE))]
)
buf.seek(0)
return buf.read(), resources
|
python
|
{
"resource": ""
}
|
q14743
|
OneCodexDocumentExporter.from_notebook_node
|
train
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Takes PDF output from PDFExporter and uploads to One Codex Documents portal."""
output, resources = super(OneCodexDocumentExporter, self).from_notebook_node(
nb, resources=resources, **kw
)
from onecodex import Api
from onecodex.lib.upload import upload_document_fileobj
ocx = Api()
default_filename = "Analysis Report - {dt:%B} {dt.day}, {dt:%Y}".format(
dt=datetime.datetime.now()
)
file_name = resources["metadata"].get("one_codex_doc_portal_filename", default_filename)
try:
document_id = upload_document_fileobj(
BytesIO(output), file_name, ocx._client.session, ocx.Documents._resource
)
except UploadException as exc:
resp = json.dumps({"status": 500, "message": str(exc)})
return resp, resources
except Exception:
resp = json.dumps(
{
"status": 500,
"message": "Upload failed. Please contact help@onecodex.com for assistance.",
}
)
return resp, resources
resp = json.dumps({"status": 200, "document_id": document_id})
return resp, resources
|
python
|
{
"resource": ""
}
|
q14744
|
generate_strings
|
train
|
def generate_strings(project_base_dir, localization_bundle_path, tmp_directory, exclude_dirs, include_strings_file,
special_ui_components_prefix):
"""
Calls the builtin 'genstrings' command with JTLocalizedString as the string to search for,
and adds strings extracted from UI elements internationalized with 'JTL' + removes duplications.
"""
localization_directory = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME)
if not os.path.exists(localization_directory):
os.makedirs(localization_directory)
localization_file = os.path.join(localization_directory, LOCALIZATION_FILENAME)
# Creating the same directory tree structure in the tmp directory
tmp_localization_directory = os.path.join(tmp_directory, DEFAULT_LANGUAGE_DIRECTORY_NAME)
tmp_localization_file = os.path.join(tmp_localization_directory, LOCALIZATION_FILENAME)
if os.path.isdir(tmp_localization_directory):
shutil.rmtree(tmp_localization_directory)
os.mkdir(tmp_localization_directory)
logging.info("Running genstrings")
source_files = extract_source_files(project_base_dir, exclude_dirs)
genstrings_cmd = 'genstrings -s JTLocalizedString -o %s %s' % (tmp_localization_directory, " ".join(
['"%s"' % (source_file,) for source_file in source_files]))
genstrings_process = subprocess.Popen(genstrings_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
genstrings_out, genstrings_err = genstrings_process.communicate()
remove_empty_comments_from_file(tmp_localization_file)
add_genstrings_comments_to_file(tmp_localization_file, genstrings_err)
genstrings_rc = genstrings_process.returncode
if genstrings_rc != 0:
logging.fatal("genstrings returned %d, aborting run!", genstrings_rc)
sys.exit(genstrings_rc)
create_localized_strings_from_ib_files(project_base_dir, exclude_dirs, tmp_localization_file,
special_ui_components_prefix)
if include_strings_file:
target = open_strings_file(tmp_localization_file, "a")
source = open_strings_file(include_strings_file, "r")
target.write(source.read())
source.close()
target.close()
handle_duplications(tmp_localization_file)
if os.path.isfile(localization_file):
logging.info("Merging old localizable with new one...")
merge_strings_files(localization_file, tmp_localization_file)
else:
logging.info("No Localizable yet, moving the created file...")
shutil.move(tmp_localization_file, localization_file)
|
python
|
{
"resource": ""
}
|
q14745
|
SampleCollection.filter
|
train
|
def filter(self, filter_func):
"""Return a new SampleCollection containing only samples meeting the filter criteria.
Will pass any kwargs (e.g., field or skip_missing) used when instantiating the current class
on to the new SampleCollection that is returned.
Parameters
----------
filter_func : `callable`
A function that will be evaluated on every object in the collection. The function must
return a `bool`. If True, the object will be kept. If False, it will be removed from the
SampleCollection that is returned.
Returns
-------
`onecodex.models.SampleCollection` containing only objects `filter_func` returned True on.
Examples
--------
Generate a new collection of Samples that have a specific filename extension:
new_collection = samples.filter(lambda s: s.filename.endswith('.fastq.gz'))
"""
if callable(filter_func):
return self.__class__([obj for obj in self if filter_func(obj) is True], **self._kwargs)
else:
raise OneCodexException(
"Expected callable for filter, got: {}".format(type(filter_func).__name__)
)
|
python
|
{
"resource": ""
}
|
q14746
|
SampleCollection._classification_fetch
|
train
|
def _classification_fetch(self, skip_missing=None):
"""Turns a list of objects associated with a classification result into a list of
Classifications objects.
Parameters
----------
skip_missing : `bool`
If an analysis was not successful, exclude it, warn, and keep going
Returns
-------
None, but stores a result in self._cached.
"""
skip_missing = skip_missing if skip_missing else self._kwargs["skip_missing"]
new_classifications = []
for a in self._res_list:
if a.__class__.__name__ == "Samples":
c = a.primary_classification
elif a.__class__.__name__ == "Classifications":
c = a
else:
raise OneCodexException(
"Objects in SampleCollection must be one of: Classifications, Samples"
)
if skip_missing and not c.success:
warnings.warn("Classification {} not successful. Skipping.".format(c.id))
continue
new_classifications.append(c)
self._cached["classifications"] = new_classifications
|
python
|
{
"resource": ""
}
|
q14747
|
SampleCollection._collate_metadata
|
train
|
def _collate_metadata(self):
"""Turns a list of objects associated with a classification result into a DataFrame of
metadata.
Returns
-------
None, but stores a result in self._cached.
"""
import pandas as pd
DEFAULT_FIELDS = None
metadata = []
for c in self._classifications:
m = c.sample.metadata
if DEFAULT_FIELDS is None:
DEFAULT_FIELDS = list(m._resource._schema["properties"].keys())
DEFAULT_FIELDS.remove("$uri")
DEFAULT_FIELDS.remove("sample")
metadatum = {f: getattr(m, f) for f in DEFAULT_FIELDS}
metadatum["classification_id"] = c.id
metadatum["sample_id"] = m.sample.id
metadatum["metadata_id"] = m.id
metadatum["created_at"] = m.sample.created_at
metadatum["filename"] = c.sample.filename
metadatum.update(m.custom)
metadata.append(metadatum)
if metadata:
metadata = pd.DataFrame(metadata).set_index("classification_id")
else:
metadata = pd.DataFrame(
columns=["classification_id", "sample_id", "metadata_id", "created_at"]
)
self._cached["metadata"] = metadata
|
python
|
{
"resource": ""
}
|
q14748
|
SampleCollection._collate_results
|
train
|
def _collate_results(self, field=None):
"""For a list of objects associated with a classification result, return the results as a
DataFrame and dict of taxa info.
Parameters
----------
field : {'readcount_w_children', 'readcount', 'abundance'}
Which field to use for the abundance/count of a particular taxon in a sample.
- 'readcount_w_children': total reads of this taxon and all its descendants
- 'readcount': total reads of this taxon
- 'abundance': genome size-normalized relative abundances, from shotgun sequencing
Returns
-------
None, but stores a result in self._cached.
"""
import pandas as pd
field = field if field else self._kwargs["field"]
if field not in ("auto", "abundance", "readcount", "readcount_w_children"):
raise OneCodexException("Specified field ({}) not valid.".format(field))
# we'll fill these dicts that eventually turn into DataFrames
df = {"classification_id": [c.id for c in self._classifications]}
tax_info = {"tax_id": [], "name": [], "rank": [], "parent_tax_id": []}
if field == "auto":
field = "readcount_w_children"
self._cached["field"] = field
for c_idx, c in enumerate(self._classifications):
# pulling results from mainline is the slowest part of the function
result = c.results()["table"]
# d contains info about a taxon in result, including name, id, counts, rank, etc.
for d in result:
d_tax_id = d["tax_id"]
if d_tax_id not in tax_info["tax_id"]:
for k in ("tax_id", "name", "rank", "parent_tax_id"):
tax_info[k].append(d[k])
# first time we've seen this taxon, so make a vector for it
df[d_tax_id] = [0] * len(self._classifications)
df[d_tax_id][c_idx] = d[field]
# format as a Pandas DataFrame
df = pd.DataFrame(df).set_index("classification_id").fillna(0)
df.columns.name = "tax_id"
tax_info = pd.DataFrame(tax_info).set_index("tax_id")
self._cached["results"] = df
self._cached["taxonomy"] = tax_info
|
python
|
{
"resource": ""
}
|
q14749
|
SampleCollection.to_otu
|
train
|
def to_otu(self, biom_id=None):
"""Converts a list of objects associated with a classification result into a `dict` resembling
an OTU table.
Parameters
----------
biom_id : `string`, optional
Optionally specify an `id` field for the generated v1 BIOM file.
Returns
-------
otu_table : `OrderedDict`
A BIOM OTU table, returned as a Python OrderedDict (can be dumped to JSON)
"""
otu_format = "Biological Observation Matrix 1.0.0"
# Note: This is exact format URL is required by https://github.com/biocore/biom-format
otu_url = "http://biom-format.org"
otu = OrderedDict(
{
"id": biom_id,
"format": otu_format,
"format_url": otu_url,
"type": "OTU table",
"generated_by": "One Codex API V1",
"date": datetime.now().isoformat(),
"rows": [],
"columns": [],
"matrix_type": "sparse",
"matrix_element_type": "int",
}
)
rows = defaultdict(dict)
tax_ids_to_names = {}
for classification in self._classifications:
col_id = len(otu["columns"]) # 0 index
# Re-encoding the JSON is a bit of a hack, but
# we need a ._to_dict() method that properly
# resolves references and don't have one at the moment
columns_entry = {
"id": str(classification.id),
"sample_id": str(classification.sample.id),
"sample_filename": classification.sample.filename,
"metadata": json.loads(
classification.sample.metadata._to_json(include_references=False)
),
}
otu["columns"].append(columns_entry)
sample_df = classification.table()
for row in sample_df.iterrows():
tax_id = row[1]["tax_id"]
tax_ids_to_names[tax_id] = row[1]["name"]
rows[tax_id][col_id] = int(row[1]["readcount"])
num_rows = len(rows)
num_cols = len(otu["columns"])
otu["shape"] = [num_rows, num_cols]
otu["data"] = []
for present_taxa in sorted(rows):
# add the row entry
row_id = len(otu["rows"])
otu["rows"].append(
{"id": present_taxa, "metadata": {"taxonomy": tax_ids_to_names[present_taxa]}}
)
for sample_with_hit in rows[present_taxa]:
counts = rows[present_taxa][sample_with_hit]
otu["data"].append([row_id, sample_with_hit, counts])
return otu
|
python
|
{
"resource": ""
}
|
q14750
|
localization_merge_back
|
train
|
def localization_merge_back(updated_localizable_file, old_translated_file, new_translated_file, merged_translated_file):
""" Generates a file merging the old translations and the new ones.
Args:
updated_localizable_file (str): The path to the updated localization strings file, meaning the strings that
require translation.
old_translated_file (str): The path to the strings file containing the previously translated strings.
new_translated_file (str): The path to the strings file containing the newly translated strings.
merged_translated_file (str): The path to the output file with the merged translations.
"""
output_file_elements = []
old_translated_file_dict = generate_localization_key_to_entry_dictionary_from_file(old_translated_file)
new_translated_file_dict = generate_localization_key_to_entry_dictionary_from_file(new_translated_file)
f = open_strings_file(updated_localizable_file, "r")
for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):
translation_value = None
if len(header_comment) > 0:
output_file_elements.append(Comment(header_comment))
if value in new_translated_file_dict:
translation_value = new_translated_file_dict[value].value
elif key in old_translated_file_dict:
translation_value = old_translated_file_dict[key].value
elif key in new_translated_file_dict:
translation_value = new_translated_file_dict[key].value
if translation_value is not None:
output_file_elements.append(LocalizationEntry(comments, key, translation_value))
f.close()
write_file_elements_to_strings_file(merged_translated_file, output_file_elements)
|
python
|
{
"resource": ""
}
|
q14751
|
boxplot
|
train
|
def boxplot(df, category, quantity, category_type="N", title=None, xlabel=None, ylabel=None):
"""Plot a simple boxplot using Altair.
Parameters
----------
df : `pandas.DataFrame`
Contains columns matching 'category' and 'quantity' labels, at a minimum.
category : `string`
The name of the column in df used to group values on the horizontal axis.
quantity : `string`
The name of the columm in df of values to plot on the vertical axis. Must be numerical.
category_type : {'N', 'O', 'T'}, optional
Nominal, ordinal, or time values can be used as categories. Quantitative (Q) values look weird.
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
Returns
-------
`altair.Chart`
"""
# must be one of Nominal, Ordinal, Time per altair
if category_type not in ("N", "O", "T"):
raise OneCodexException("If specifying category_type, must be N, O, or T")
# adapted from https://altair-viz.github.io/gallery/boxplot_max_min.html
lower_box = "q1({}):Q".format(quantity)
lower_whisker = "min({}):Q".format(quantity)
upper_box = "q3({}):Q".format(quantity)
upper_whisker = "max({}):Q".format(quantity)
if category_type == "T":
x_format = "hoursminutes({}):{}".format(category, category_type)
else:
x_format = "{}:{}".format(category, category_type)
lower_plot = (
alt.Chart(df)
.mark_rule()
.encode(y=alt.Y(lower_whisker, axis=alt.Axis(title=ylabel)), y2=lower_box, x=x_format)
)
middle_plot = alt.Chart(df).mark_bar(size=35).encode(y=lower_box, y2=upper_box, x=x_format)
upper_plot = alt.Chart(df).mark_rule().encode(y=upper_whisker, y2=upper_box, x=x_format)
middle_tick = (
alt.Chart(df)
.mark_tick(color="black", size=35)
.encode(
y="median({}):Q".format(quantity),
x=alt.X(x_format, axis=alt.Axis(title=xlabel), scale=alt.Scale(rangeStep=45)),
tooltip="median({}):Q".format(quantity),
)
)
chart = lower_plot + middle_plot + upper_plot + middle_tick
if title:
chart = chart.properties(title=title)
return chart
|
python
|
{
"resource": ""
}
|
q14752
|
dendrogram
|
train
|
def dendrogram(tree):
"""Plot a simple square dendrogram using Altair.
Parameters
----------
tree : `dict` returned by `scipy.cluster.hierarchy.dendrogram`
Contains, at a minimum, 'icoord', 'dcoord', and 'leaves' keys. Scipy does all the work of
determining where the lines in the tree should go. All we have to do is draw them.
Returns
-------
`altair.Chart`
"""
plot_data = {
"x": [],
"y": [],
"o": [], # order these points should be connected in
"b": [], # one number per branch
}
for idx, (i, d) in enumerate(zip(tree["icoord"], tree["dcoord"])):
plot_data["x"].extend(map(lambda x: -x, d))
plot_data["y"].extend(map(lambda x: -x, i))
plot_data["o"].extend([0, 1, 2, 3])
plot_data["b"].extend([idx] * 4)
plot_data = pd.DataFrame(plot_data)
chart = (
alt.Chart(plot_data, width=100, height=15 * len(tree["leaves"]) - 7.5)
.mark_line(point=False, opacity=0.5)
.encode(
x=alt.X("x", axis=None),
y=alt.Y("y", axis=None, scale=alt.Scale(zero=True, nice=False)),
order="o",
color=alt.Color(
"b:N",
scale=alt.Scale(domain=list(range(idx + 1)), range=["black"] * (idx + 1)),
legend=None,
),
)
)
return chart
|
python
|
{
"resource": ""
}
|
q14753
|
add_genstrings_comments_to_file
|
train
|
def add_genstrings_comments_to_file(localization_file, genstrings_err):
""" Adds the comments produced by the genstrings script for duplicate keys.
Args:
localization_file (str): The path to the strings file.
"""
errors_to_log = [line for line in genstrings_err.splitlines() if "used with multiple comments" not in line]
if len(errors_to_log) > 0:
logging.warning("genstrings warnings:\n%s", "\n".join(errors_to_log))
loc_file = open_strings_file(localization_file, "a")
regex_matches = re.findall(r'Warning: Key "(.*?)" used with multiple comments ("[^"]*" (& "[^"]*")+)',
genstrings_err)
logging.info("Adding multiple comments from genstrings output")
for regex_match in regex_matches:
if len(regex_match) == 3:
key = regex_match[0]
comments = [comment.strip()[1:-1] for comment in regex_match[1].split("&")]
logging.info("Found key with %d comments: %s", len(comments), key)
loc_key = LocalizationEntry(comments, key, key)
loc_file.write(unicode(loc_key))
loc_file.write(u"\n")
loc_file.close()
|
python
|
{
"resource": ""
}
|
q14754
|
ResourceDownloadMixin.download
|
train
|
def download(self, path=None, file_obj=None, progressbar=False):
"""Downloads files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
if path and file_obj:
raise OneCodexException("Please specify only one of: path, file_obj")
if path is None and file_obj is None:
path = os.path.join(os.getcwd(), self.filename)
if path and os.path.exists(path):
raise OneCodexException("{} already exists! Will not overwrite.".format(path))
try:
url_data = self._resource.download_uri()
resp = requests.get(url_data["download_uri"], stream=True)
with (open(path, "wb") if path else file_obj) as f_out:
if progressbar:
with click.progressbar(length=self.size, label=self.filename) as bar:
for data in resp.iter_content(chunk_size=1024):
bar.update(len(data))
f_out.write(data)
else:
for data in resp.iter_content(chunk_size=1024):
f_out.write(data)
except KeyboardInterrupt:
if path:
os.remove(path)
raise
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 401:
raise OneCodexException("You must be logged in to download files.")
elif exc.response.status_code == 402:
raise OneCodexException(
"You must either have a premium platform account or be in "
"a notebook environment to download files."
)
elif exc.response.status_code == 403:
raise OneCodexException("You are not authorized to download this file.")
else:
raise OneCodexException(
"Download failed with an HTTP status code {}.".format(exc.response.status_code)
)
return path
|
python
|
{
"resource": ""
}
|
q14755
|
prepare_for_translation
|
train
|
def prepare_for_translation(localization_bundle_path):
""" Prepares the localization bundle for translation.
This means, after creating the strings files using genstrings.sh, this will produce '.pending' files, that contain
the files that are yet to be translated.
Args:
localization_bundle_path (str): The path to the localization bundle.
"""
logging.info("Preparing for translation..")
for strings_file in os.listdir(os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME)):
if not strings_file.endswith(".strings"):
continue
strings_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, strings_file)
for lang_dir in os.listdir(localization_bundle_path):
if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME or lang_dir.startswith("."):
continue
dest_strings_path = os.path.join(localization_bundle_path, lang_dir, strings_file)
pending_path = dest_strings_path + ".pending"
excluded_path = dest_strings_path + ".excluded"
if not os.path.exists(dest_strings_path):
open_strings_file(dest_strings_path, "a").close()
logging.info("Preparing diff for %s in %s", lang_dir, pending_path)
localization_diff(strings_path, dest_strings_path, excluded_path, pending_path)
|
python
|
{
"resource": ""
}
|
q14756
|
onecodex
|
train
|
def onecodex(ctx, api_key, no_pprint, verbose, telemetry):
"""One Codex v1 API command line interface"""
# set up the context for sub commands
click.Context.get_usage = click.Context.get_help
ctx.obj = {}
ctx.obj["API_KEY"] = api_key
ctx.obj["NOPPRINT"] = no_pprint
ctx.obj["TELEMETRY"] = telemetry
if verbose:
log.setLevel(logging.DEBUG)
# handle checking insecure platform, we let upload command do it by itself
if ctx.invoked_subcommand != "upload":
warn_if_insecure_platform()
|
python
|
{
"resource": ""
}
|
q14757
|
classifications
|
train
|
def classifications(ctx, classifications, results, readlevel, readlevel_path):
"""Retrieve performed metagenomic classifications"""
# basic operation -- just print
if not readlevel and not results:
cli_resource_fetcher(ctx, "classifications", classifications)
# fetch the results
elif not readlevel and results:
if len(classifications) != 1:
log.error("Can only request results data on one Classification at a time")
else:
classification = ctx.obj["API"].Classifications.get(classifications[0])
if not classification:
log.error(
"Could not find classification {} (404 status code)".format(classifications[0])
)
return
results = classification.results(json=True)
pprint(results, ctx.obj["NOPPRINT"])
# fetch the readlevel
elif readlevel is not None and not results:
if len(classifications) != 1:
log.error("Can only request read-level data on one Classification at a time")
else:
classification = ctx.obj["API"].Classifications.get(classifications[0])
if not classification:
log.error(
"Could not find classification {} (404 status code)".format(classifications[0])
)
return
tsv_url = classification._readlevel()["url"]
log.info("Downloading tsv data from: {}".format(tsv_url))
download_file_helper(tsv_url, readlevel_path)
# both given -- complain
else:
log.error("Can only request one of read-level data or results data at a time")
|
python
|
{
"resource": ""
}
|
q14758
|
schema_resolve_refs
|
train
|
def schema_resolve_refs(schema, ref_resolver=None, root=None):
"""
Helper method for decoding references. Self-references are resolved automatically; other references are
resolved using a callback function.
:param object schema:
:param callable ref_resolver:
:param None root:
:return:
"""
# FIXME more stable implementation that only attempts to resolve {"$ref"} objects where they are allowed.
if isinstance(schema, dict):
if len(schema) == 1 and "$ref" in schema and isinstance(schema["$ref"], six.string_types):
reference = schema["$ref"]
if reference.startswith("#"):
# TODO should also resolve any paths within the reference, which would need to be deferred.
return root
return ref_resolver(reference)
resolved = {}
for k, v in schema.items():
resolved[k] = schema_resolve_refs(v,
ref_resolver=ref_resolver,
root=root if root is not None else resolved)
return resolved
if isinstance(schema, (list, tuple)):
return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root) for v in schema]
return schema
|
python
|
{
"resource": ""
}
|
q14759
|
reference
|
train
|
def reference(text=None, label=None):
"""Add a reference to the bibliography and insert a superscript number.
Parameters
----------
text : `string`, optional
The complete text of the reference, e.g. Roo, et al. "How to Python." Nature, 2019.
label : `string`, optional
A short label to describe this reference.
Notes
-----
1) Every time reference() is called, the reference number is auto-incremented. That is, the first
time you call this, a superscript 1 is inserted. The next time you call this (with a different
reference), a superscript 2 is inserted.
2) This function returns HTML. It is meant to be used inside a Markdown cell in your IPython
notebook, or concatenated together with another string that's used as input to a function
here in the `report` module.
Examples
--------
You want to insert a reference at the current position, and store it using a short label so you
can access it again without typing the entire reference text.
>>> reference('Roo, et al. "How to Python." Nature, 2019.', 'roo1')
'<sup class="reference">1</sup>'
The next time you want to insert this reference, just use the short 'roo1' label.
>>> reference(label='roo1')
'<sup class="reference">1</sup>'
You want to insert a list of references in a single IPython cell, each with a short label, and
use them all later without displaying any of the superscript reference numbers now.
_ = reference('Roo, et al. "How to Python." Nature, 2019.', 'roo1')
_ = reference('Roo, et al. "The Tao of Roo." Random House, 2018.', 'roo2')
_ = reference('Roo, et al. "Roo and the Art of Database Maintenance." N/A, 2017.', 'roo3')
~~~ And later, in a Markdown cell in your IPython notebook ~~~
As Roo, et al. outlined in a previous work{reference(label='roo2')}, all play and no work
makes for a happy dog. Later, the same authors applied similar reasoning to the care of
Burmese Pythons{reference(label='roo1')}. By comparing the care of dogs and Pythons to
SQL databases, Roo, et al. make a compelling argument for why writing docstrings can be fun
and not just a chore{reference(label='roo3')}.
You want to insert a reference into a figure legend, using `report.legend`.
report.legend(
'As you can clearly see in the above figure, the data supports my conclusion '
'and does not support the conclusion of my peers{reference(label='similar_paper1')}. '
'This is most likely because I am smarter and more attractive than the authors of '
'those other publications{reference(label='ego_and_insecurity1')}.'
)
"""
if text is None and label is None:
raise OneCodexException("Please specify at least one of: text, label")
try:
ipy = get_ipython()
ref_list = ipy.meta.get("references", {})
except NameError:
raise OneCodexException("Must be run from within IPython")
def to_html(ref_num):
return '<sup class="reference">{}</sup>'.format(ref_num)
if text is not None:
# has this reference already been cited?
for ref_label, (ref_num, ref_text) in ref_list.items():
if text == ref_text:
if label is not None and label != ref_label:
raise OneCodexException(
"Citation already in use with label={}".format(ref_label)
)
else:
break
else:
# reference has not been cited. is the label already in use?
if label is not None and label in ref_list.keys():
raise OneCodexException("Citation label={} already in use".format(label))
# create the citation and assign next number
if not ref_list:
ref_num = 1
else:
ref_num = max([x[0] for x in ref_list.values()]) + 1
if label is None:
ref_label = ref_num
else:
ref_label = label
ref_list[ref_label] = (ref_num, text)
ipy.meta["references"] = ref_list
return to_html(ref_num)
elif label is not None:
if label not in ref_list.keys():
raise OneCodexException("Cannot find citation with label={}".format(label))
return to_html(ref_list[label][0])
|
python
|
{
"resource": ""
}
|
q14760
|
extract_element_internationalized_comment
|
train
|
def extract_element_internationalized_comment(element):
""" Extracts the xib element's comment, if the element has been internationalized.
Args:
element (element): The element from which to extract the comment.
Returns:
The element's internationalized comment, None if it does not exist, or hasn't been internationalized (according
to the JTLocalize definitions).
"""
element_entry_comment = get_element_attribute_or_empty(element, 'userLabel')
if element_entry_comment == "":
try:
element_entry_comment = element.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
element_entry_comment = ""
if not element_entry_comment.lower().startswith(JT_INTERNATIONALIZED_COMMENT_PREFIX):
return None
else:
return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]
|
python
|
{
"resource": ""
}
|
q14761
|
add_string_pairs_from_attributed_ui_element
|
train
|
def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix):
""" Adds string pairs from a UI element with attributed text
Args:
results (list): The list to add the results to.
attributed_element (element): The element from the xib that contains, to extract the fragments from.
comment_prefix (str): The prefix of the comment to use for extracted string
(will be appended "Part X" suffices)
Returns:
bool: Whether or not an attributed string was found.
"""
attributed_strings = ui_element.getElementsByTagName('attributedString')
if attributed_strings.length == 0:
return False
attributed_element = attributed_strings[0]
fragment_index = 1
for fragment in attributed_element.getElementsByTagName('fragment'):
# The fragment text is either as an attribute <fragment content="TEXT">
# or a child in the format <string key='content'>TEXT</string>
try:
label_entry_key = fragment.attributes['content'].value
except KeyError:
label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue
comment = "%s Part %d" % (comment_prefix, fragment_index)
results.append((label_entry_key, comment))
fragment_index += 1
return fragment_index > 1
|
python
|
{
"resource": ""
}
|
q14762
|
add_string_pairs_from_label_element
|
train
|
def add_string_pairs_from_label_element(xib_file, results, label, special_ui_components_prefix):
""" Adds string pairs from a label element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
label (element): The label element from the xib, to extract the string pairs from.
special_ui_components_prefix (str):
If not None, extraction will not warn about internationalized UI components with this class prefix.
"""
label_entry_comment = extract_element_internationalized_comment(label)
if label_entry_comment is None:
return
warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix)
if label.hasAttribute('usesAttributedText') and label.attributes['usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, label, label_entry_comment)
else:
try:
label_entry_key = label.attributes['text'].value
except KeyError:
try:
label_entry_key = label.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
label_entry_key = 'N/A'
logging.warn("%s: Missing text entry in %s", xib_file, label.toxml('UTF8'))
results.append((label_entry_key, label_entry_comment))
|
python
|
{
"resource": ""
}
|
q14763
|
add_string_pairs_from_text_field_element
|
train
|
def add_string_pairs_from_text_field_element(xib_file, results, text_field, special_ui_components_prefix):
""" Adds string pairs from a textfield element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
text_field(element): The textfield element from the xib, to extract the string pairs from.
special_ui_components_prefix (str):
If not None, extraction will not warn about internationalized UI components with this class prefix.
"""
text_field_entry_comment = extract_element_internationalized_comment(text_field)
if text_field_entry_comment is None:
return
if text_field.hasAttribute('usesAttributedText') and text_field.attributes['usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, text_field, text_field_entry_comment)
else:
try:
text_field_entry_key = text_field.attributes['text'].value
results.append((text_field_entry_key, text_field_entry_comment + ' default text value'))
except KeyError:
pass
try:
text_field_entry_key = text_field.attributes['placeholder'].value
results.append((text_field_entry_key, text_field_entry_comment + ' placeholder text value'))
except KeyError:
pass
warn_if_element_not_of_class(text_field, 'TextField', special_ui_components_prefix)
|
python
|
{
"resource": ""
}
|
q14764
|
add_string_pairs_from_text_view_element
|
train
|
def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix):
""" Adds string pairs from a textview element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
text_view(element): The textview element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
"""
text_view_entry_comment = extract_element_internationalized_comment(text_view)
if text_view_entry_comment is None:
return
if text_view.hasAttribute('usesAttributedText') and text_view.attributes['usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment)
else:
try:
text_view_entry_key = text_view.attributes['text'].value
results.append((text_view_entry_key, text_view_entry_comment + ' default text value'))
except KeyError:
pass
warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)
|
python
|
{
"resource": ""
}
|
q14765
|
add_string_pairs_from_button_element
|
train
|
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):
""" Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
"""
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return
for state in button.getElementsByTagName('state'):
state_name = state.attributes['key'].value
state_entry_comment = button_entry_comment + " - " + state_name + " state of button"
if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):
try:
button_entry_key = state.attributes['title'].value
except KeyError:
try:
button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
continue
results.append((button_entry_key, state_entry_comment))
warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)
|
python
|
{
"resource": ""
}
|
q14766
|
localization_diff
|
train
|
def localization_diff(localizable_file, translated_file, excluded_strings_file, output_translation_file):
""" Generates a strings file representing the strings that were yet to be translated.
Args:
localizable_file (str): The path to the localization strings file, meaning the file that represents the strings
that require translation.
translated_file (str): The path to the translated strings file, meaning the file containing the strings that
were already translated.
excluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and
from future diffs.
output_translation_file (str): The path to the output file, which will contain the strings the require
translation, but are not in the already given translation file.
"""
old_translated_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(translated_file)
if excluded_strings_file is not None and os.path.isfile(excluded_strings_file):
excluded_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(excluded_strings_file)
else:
excluded_file_dictionary = {}
# The reason we keep a list of the keys, and not just pop is because values can repeat themselves.
translated_list = old_translated_file_dictionary.keys()
output_dictionary = {}
output_file_elements = []
f = open_strings_file(localizable_file, "r")
output_file_elements.append(Comment(u"""
/**
* This file contains all the strings that were extracted from our app and that need to be translated.
* Each entry may or may not have a comment explaining context, and a "key" = "%s" equation.
* To localize, you need to fill the right side of the equation with the translation of the left side.
* Please keep special expressions such as '%%@' or '%%1$@' as is. Usually the comment will explain their context.
*/
""" % (VALUE_PLACEHOLDER,)))
for _header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):
if key in translated_list or key in excluded_file_dictionary:
if key in old_translated_file_dictionary:
old_translated_file_dictionary.pop(key)
elif value in output_dictionary:
output_dictionary[value].add_comments(comments)
output_file_elements.append(Comment(
u"/* There was a value '%s' here but it was a duplicate of an older value and removed. */\n" % value))
else:
loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER)
output_dictionary[value] = loc_obj
output_file_elements.append(loc_obj)
for key, removed_trans in old_translated_file_dictionary.items():
output_file_elements.append(Comment(u"""
/*
* Entry removed from previous translation file:
* %s
* "%s" = "%s";
*/
""" % (", ".join(removed_trans.comments), removed_trans.key, removed_trans.value)))
write_file_elements_to_strings_file(output_translation_file, output_file_elements)
|
python
|
{
"resource": ""
}
|
q14767
|
add_404_page
|
train
|
def add_404_page(app):
"""Build an extra ``404.html`` page if no ``"404"`` key is in the
``html_additional_pages`` config.
"""
is_epub = isinstance(app.builder, EpubBuilder)
config_pages = app.config.html_additional_pages
if not is_epub and "404" not in config_pages:
yield ("404", {}, "404.html")
|
python
|
{
"resource": ""
}
|
q14768
|
singlehtml_sidebars
|
train
|
def singlehtml_sidebars(app):
"""When using a ``singlehtml`` builder, replace the
``html_sidebars`` config with ``singlehtml_sidebars``. This can be
used to change what sidebars are rendered for the single page called
``"index"`` by the builder.
"""
if app.config.singlehtml_sidebars is not None and isinstance(
app.builder, SingleFileHTMLBuilder
):
app.config.html_sidebars = app.config.singlehtml_sidebars
|
python
|
{
"resource": ""
}
|
q14769
|
get_version
|
train
|
def get_version(name, version_length=2, placeholder="x"):
"""Ensures that the named package is installed and returns version
strings to be used by Sphinx.
Sphinx uses ``version`` to mean an abbreviated form of the full
version string, which is called ``release``. In ``conf.py``::
release, version = get_version("Flask")
# release = 1.0.x, version = 1.0.3.dev0
:param name: Name of package to get.
:param version_length: How many values from ``release`` to use for
``version``.
:param placeholder: Extra suffix to add to the version. The default
produces versions like ``1.2.x``.
:return: ``(release, version)`` tuple.
"""
try:
release = pkg_resources.get_distribution(name).version
except ImportError:
print(
textwrap.fill(
"'{name}' must be installed to build the documentation."
" Install from source using `pip install -e .` in a"
" virtualenv.".format(name=name)
)
)
sys.exit(1)
version = ".".join(release.split(".", version_length)[:version_length])
if placeholder:
version = "{}.{}".format(version, placeholder)
return release, version
|
python
|
{
"resource": ""
}
|
q14770
|
set_is_pallets_theme
|
train
|
def set_is_pallets_theme(app):
"""Set the ``is_pallets_theme`` config to ``True`` if the current
theme is a decedent of the ``pocoo`` theme.
"""
if app.config.is_pallets_theme is not None:
return
theme = getattr(app.builder, "theme", None)
while theme is not None:
if theme.name == "pocoo":
app.config.is_pallets_theme = True
break
theme = theme.base
else:
app.config.is_pallets_theme = False
|
python
|
{
"resource": ""
}
|
q14771
|
only_pallets_theme
|
train
|
def only_pallets_theme(default=None):
"""Create a decorator that calls a function only if the
``is_pallets_theme`` config is ``True``.
Used to prevent Sphinx event callbacks from doing anything if the
Pallets themes are installed but not used. ::
@only_pallets_theme()
def inject_value(app):
...
app.connect("builder-inited", inject_value)
:param default: Value to return if a Pallets theme is not in use.
:return: A decorator.
"""
def decorator(f):
@wraps(f)
def wrapped(app, *args, **kwargs):
if not app.config.is_pallets_theme:
return default
return f(app, *args, **kwargs)
return wrapped
return decorator
|
python
|
{
"resource": ""
}
|
q14772
|
ExampleRunner.declare_example
|
train
|
def declare_example(self, source):
"""Execute the given code, adding it to the runner's namespace."""
with patch_modules():
code = compile(source, "<docs>", "exec")
exec(code, self.namespace)
|
python
|
{
"resource": ""
}
|
q14773
|
setup
|
train
|
def setup(
*,
verbose: bool = False,
quiet: bool = False,
color: str = "auto",
title: str = "auto",
timestamp: bool = False
) -> None:
""" Configure behavior of message functions.
:param verbose: Whether :func:`debug` messages should get printed
:param quiet: Hide every message except :func:`warning`, :func:`error`, and
:func:`fatal`
:param color: Choices: 'auto', 'always', or 'never'. Whether to color output.
By default ('auto'), only use color when output is a terminal.
:param title: Ditto for setting terminal title
:param timestamp: Whether to prefix every message with a time stamp
"""
_setup(verbose=verbose, quiet=quiet, color=color, title=title, timestamp=timestamp)
|
python
|
{
"resource": ""
}
|
q14774
|
process_tokens
|
train
|
def process_tokens(
tokens: Sequence[Token], *, end: str = "\n", sep: str = " "
) -> Tuple[str, str]:
""" Returns two strings from a list of tokens.
One containing ASCII escape codes, the other
only the 'normal' characters
"""
# Flatten the list of tokens in case some of them are of
# class UnicodeSequence:
flat_tokens = list() # type: List[Token]
for token in tokens:
if isinstance(token, UnicodeSequence):
flat_tokens.extend(token.tuple())
else:
flat_tokens.append(token)
with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True)
without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False)
return (with_color, without_color)
|
python
|
{
"resource": ""
}
|
q14775
|
message
|
train
|
def message(
*tokens: Token,
end: str = "\n",
sep: str = " ",
fileobj: FileObj = sys.stdout,
update_title: bool = False
) -> None:
""" Helper method for error, warning, info, debug
"""
if using_colorama():
global _INITIALIZED
if not _INITIALIZED:
colorama.init()
_INITIALIZED = True
with_color, without_color = process_tokens(tokens, end=end, sep=sep)
if CONFIG["record"]:
_MESSAGES.append(without_color)
if update_title and with_color:
write_title_string(without_color, fileobj)
to_write = with_color if config_color(fileobj) else without_color
write_and_flush(fileobj, to_write)
|
python
|
{
"resource": ""
}
|
q14776
|
fatal
|
train
|
def fatal(*tokens: Token, **kwargs: Any) -> None:
""" Print an error message and call ``sys.exit`` """
error(*tokens, **kwargs)
sys.exit(1)
|
python
|
{
"resource": ""
}
|
q14777
|
info_section
|
train
|
def info_section(*tokens: Token, **kwargs: Any) -> None:
""" Print an underlined section name """
# We need to know the length of the section:
process_tokens_kwargs = kwargs.copy()
process_tokens_kwargs["color"] = False
no_color = _process_tokens(tokens, **process_tokens_kwargs)
info(*tokens, **kwargs)
info("-" * len(no_color), end="\n\n")
|
python
|
{
"resource": ""
}
|
q14778
|
info_1
|
train
|
def info_1(*tokens: Token, **kwargs: Any) -> None:
""" Print an important informative message """
info(bold, blue, "::", reset, *tokens, **kwargs)
|
python
|
{
"resource": ""
}
|
q14779
|
dot
|
train
|
def dot(*, last: bool = False, fileobj: Any = None) -> None:
""" Print a dot without a newline unless it is the last one.
Useful when you want to display a progress with very little
knowledge.
:param last: whether this is the last dot (will insert a newline)
"""
end = "\n" if last else ""
info(".", end=end, fileobj=fileobj)
|
python
|
{
"resource": ""
}
|
q14780
|
info_count
|
train
|
def info_count(i: int, n: int, *rest: Token, **kwargs: Any) -> None:
""" Display a counter before the rest of the message.
``rest`` and ``kwargs`` are passed to :func:`info`
Current index should start at 0 and end at ``n-1``, like in ``enumerate()``
:param i: current index
:param n: total number of items
"""
num_digits = len(str(n))
counter_format = "(%{}d/%d)".format(num_digits)
counter_str = counter_format % (i + 1, n)
info(green, "*", reset, counter_str, reset, *rest, **kwargs)
|
python
|
{
"resource": ""
}
|
q14781
|
info_progress
|
train
|
def info_progress(prefix: str, value: float, max_value: float) -> None:
""" Display info progress in percent.
:param value: the current value
:param max_value: the max value
:param prefix: the prefix message to print
"""
if sys.stdout.isatty():
percent = float(value) / max_value * 100
sys.stdout.write(prefix + ": %.0f%%\r" % percent)
sys.stdout.flush()
|
python
|
{
"resource": ""
}
|
q14782
|
debug
|
train
|
def debug(*tokens: Token, **kwargs: Any) -> None:
""" Print a debug message.
Messages are shown only when ``CONFIG["verbose"]`` is true
"""
if not CONFIG["verbose"] or CONFIG["record"]:
return
message(*tokens, **kwargs)
|
python
|
{
"resource": ""
}
|
q14783
|
indent_iterable
|
train
|
def indent_iterable(elems: Sequence[str], num: int = 2) -> List[str]:
"""Indent an iterable."""
return [" " * num + l for l in elems]
|
python
|
{
"resource": ""
}
|
q14784
|
indent
|
train
|
def indent(text: str, num: int = 2) -> str:
"""Indent a piece of text."""
lines = text.splitlines()
return "\n".join(indent_iterable(lines, num=num))
|
python
|
{
"resource": ""
}
|
q14785
|
ask_string
|
train
|
def ask_string(*question: Token, default: Optional[str] = None) -> Optional[str]:
"""Ask the user to enter a string.
"""
tokens = get_ask_tokens(question)
if default:
tokens.append("(%s)" % default)
info(*tokens)
answer = read_input()
if not answer:
return default
return answer
|
python
|
{
"resource": ""
}
|
q14786
|
ask_password
|
train
|
def ask_password(*question: Token) -> str:
"""Ask the user to enter a password.
"""
tokens = get_ask_tokens(question)
info(*tokens)
answer = read_password()
return answer
|
python
|
{
"resource": ""
}
|
q14787
|
ask_choice
|
train
|
def ask_choice(
*prompt: Token, choices: List[Any], func_desc: Optional[FuncDesc] = None
) -> Any:
"""Ask the user to choose from a list of choices.
:return: the selected choice
``func_desc`` will be called on every list item for displaying
and sorting the list. If not given, will default to
the identity function.
Will loop until:
* the user enters a valid index
* or leaves the prompt empty
In the last case, None will be returned
"""
if func_desc is None:
func_desc = lambda x: str(x)
tokens = get_ask_tokens(prompt)
info(*tokens)
choices.sort(key=func_desc)
for i, choice in enumerate(choices, start=1):
choice_desc = func_desc(choice)
info(" ", blue, "%i" % i, reset, choice_desc)
keep_asking = True
res = None
while keep_asking:
answer = read_input()
if not answer:
return None
try:
index = int(answer)
except ValueError:
info("Please enter a valid number")
continue
if index not in range(1, len(choices) + 1):
info(str(index), "is out of range")
continue
res = choices[index - 1]
keep_asking = False
return res
|
python
|
{
"resource": ""
}
|
q14788
|
ask_yes_no
|
train
|
def ask_yes_no(*question: Token, default: bool = False) -> bool:
"""Ask the user to answer by yes or no"""
while True:
tokens = [green, "::", reset] + list(question) + [reset]
if default:
tokens.append("(Y/n)")
else:
tokens.append("(y/N)")
info(*tokens)
answer = read_input()
if answer.lower() in ["y", "yes"]:
return True
if answer.lower() in ["n", "no"]:
return False
if not answer:
return default
warning("Please answer by 'y' (yes) or 'n' (no) ")
|
python
|
{
"resource": ""
}
|
q14789
|
did_you_mean
|
train
|
def did_you_mean(message: str, user_input: str, choices: Sequence[str]) -> str:
""" Given a list of choices and an invalid user input, display the closest
items in the list that match the input.
"""
if not choices:
return message
else:
result = {
difflib.SequenceMatcher(a=user_input, b=choice).ratio(): choice
for choice in choices
}
message += "\nDid you mean: %s?" % result[max(result)]
return message
|
python
|
{
"resource": ""
}
|
q14790
|
Timer.stop
|
train
|
def stop(self) -> None:
""" Stop the timer and emit a nice log """
end_time = datetime.datetime.now()
elapsed_time = end_time - self.start_time
elapsed_seconds = elapsed_time.seconds
hours, remainder = divmod(int(elapsed_seconds), 3600)
minutes, seconds = divmod(remainder, 60)
as_str = "%sh %sm %ss %dms" % (
hours,
minutes,
seconds,
elapsed_time.microseconds / 1000,
)
info("%s took %s" % (self.description, as_str))
|
python
|
{
"resource": ""
}
|
q14791
|
xmllint_format
|
train
|
def xmllint_format(xml):
"""
Pretty-print XML like ``xmllint`` does.
Arguments:
xml (string): Serialized XML
"""
parser = ET.XMLParser(resolve_entities=False, strip_cdata=False, remove_blank_text=True)
document = ET.fromstring(xml, parser)
return ('%s\n%s' % ('<?xml version="1.0" encoding="UTF-8"?>', ET.tostring(document, pretty_print=True).decode('utf-8'))).encode('utf-8')
|
python
|
{
"resource": ""
}
|
q14792
|
Resolver.download_to_directory
|
train
|
def download_to_directory(self, directory, url, basename=None, overwrite=False, subdir=None):
"""
Download a file to the workspace.
Early Shortcut: If url is a file://-URL and that file is already in the directory, keep it there.
If basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename.
If basename is not given and no subdir is given, use the alnum characters in the URL as the basename.
Args:
directory (string): Directory to download files to
basename (string, None): basename part of the filename on disk.
url (string): URL to download from
overwrite (boolean): Whether to overwrite existing files with that name
subdir (string, None): Subdirectory to create within the directory. Think fileGrp.
Returns:
Local filename
"""
log = getLogger('ocrd.resolver.download_to_directory') # pylint: disable=redefined-outer-name
log.debug("directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|", directory, url, basename, overwrite, subdir)
if url is None:
raise Exception("'url' must be a string")
if directory is None:
raise Exception("'directory' must be a string")
if basename is None:
if (subdir is not None) or \
(directory and url.startswith('file://%s' % directory)): # in case downloading a url 'file:///tmp/foo/bar' to directory '/tmp/foo'
basename = url.rsplit('/', 1)[-1]
else:
basename = safe_filename(url)
if subdir is not None:
basename = join(subdir, basename)
outfilename = join(directory, basename)
if exists(outfilename) and not overwrite:
log.debug("File already exists and overwrite=False: %s", outfilename)
return outfilename
outfiledir = outfilename.rsplit('/', 1)[0]
# print(outfiledir)
if not isdir(outfiledir):
makedirs(outfiledir)
log.debug("Downloading <%s> to '%s'", url, outfilename)
# de-scheme file:// URL
if url.startswith('file://'):
url = url[len('file://'):]
# Copy files or download remote assets
if '://' not in url:
copyfile(url, outfilename)
else:
response = requests.get(url)
if response.status_code != 200:
raise Exception("Not found: %s (HTTP %d)" % (url, response.status_code))
with open(outfilename, 'wb') as outfile:
outfile.write(response.content)
return outfilename
|
python
|
{
"resource": ""
}
|
q14793
|
Resolver.workspace_from_url
|
train
|
def workspace_from_url(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None, download=False, baseurl=None):
"""
Create a workspace from a METS by URL.
Sets the mets.xml file
Arguments:
mets_url (string): Source mets URL
dst_dir (string, None): Target directory for the workspace
clobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception.
download (boolean, False): Whether to download all the files
baseurl (string, None): Base URL for resolving relative file locations
Returns:
Workspace
"""
if dst_dir and not dst_dir.startswith('/'):
dst_dir = abspath(dst_dir)
if mets_url is None:
if baseurl is None:
raise Exception("Must pass mets_url and/or baseurl to workspace_from_url")
else:
mets_url = 'file://%s/%s' % (baseurl, mets_basename if mets_basename else 'mets.xml')
if baseurl is None:
baseurl = mets_url.rsplit('/', 1)[0]
log.debug("workspace_from_url\nmets_url='%s'\nbaseurl='%s'\ndst_dir='%s'", mets_url, baseurl, dst_dir)
# resolve to absolute
if '://' not in mets_url:
mets_url = 'file://%s' % abspath(mets_url)
if dst_dir is None:
# if mets_url is a file-url assume working directory is source directory
if mets_url.startswith('file://'):
# if dst_dir was not given and mets_url is a file assume that
# dst_dir should be the directory where the mets.xml resides
dst_dir = dirname(mets_url[len('file://'):])
else:
dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX)
log.debug("Creating workspace '%s' for METS @ <%s>", dst_dir, mets_url)
# if mets_basename is not given, use the last URL segment of the mets_url
if mets_basename is None:
mets_basename = mets_url \
.rsplit('/', 1)[-1] \
.split('?')[0] \
.split('#')[0]
dst_mets = join(dst_dir, mets_basename)
log.debug("Copying mets url '%s' to '%s'", mets_url, dst_mets)
if 'file://' + dst_mets == mets_url:
log.debug("Target and source mets are identical")
else:
if exists(dst_mets) and not clobber_mets:
raise Exception("File '%s' already exists but clobber_mets is false" % dst_mets)
else:
self.download_to_directory(dst_dir, mets_url, basename=mets_basename)
workspace = Workspace(self, dst_dir, mets_basename=mets_basename, baseurl=baseurl)
if download:
for f in workspace.mets.find_files():
workspace.download_file(f)
return workspace
|
python
|
{
"resource": ""
}
|
q14794
|
Resolver.workspace_from_nothing
|
train
|
def workspace_from_nothing(self, directory, mets_basename='mets.xml', clobber_mets=False):
"""
Create an empty workspace.
"""
if directory is None:
directory = tempfile.mkdtemp(prefix=TMP_PREFIX)
if not exists(directory):
makedirs(directory)
mets_fpath = join(directory, mets_basename)
if not clobber_mets and exists(mets_fpath):
raise Exception("Not clobbering existing mets.xml in '%s'." % directory)
mets = OcrdMets.empty_mets()
with open(mets_fpath, 'wb') as fmets:
log.info("Writing %s", mets_fpath)
fmets.write(mets.to_xml(xmllint=True))
return Workspace(self, directory, mets)
|
python
|
{
"resource": ""
}
|
q14795
|
OcrdXmlDocument.to_xml
|
train
|
def to_xml(self, xmllint=False):
"""
Serialize all properties as pretty-printed XML
Args:
xmllint (boolean): Format with ``xmllint`` in addition to pretty-printing
"""
root = self._tree.getroot()
ret = ET.tostring(ET.ElementTree(root), pretty_print=True)
if xmllint:
ret = xmllint_format(ret)
return ret
|
python
|
{
"resource": ""
}
|
q14796
|
workspace_cli
|
train
|
def workspace_cli(ctx, directory, mets_basename, backup):
"""
Working with workspace
"""
ctx.obj = WorkspaceCtx(os.path.abspath(directory), mets_basename, automatic_backup=backup)
|
python
|
{
"resource": ""
}
|
q14797
|
workspace_clone
|
train
|
def workspace_clone(ctx, clobber_mets, download, mets_url, workspace_dir):
"""
Create a workspace from a METS_URL and return the directory
METS_URL can be a URL, an absolute path or a path relative to $PWD.
If WORKSPACE_DIR is not provided, creates a temporary directory.
"""
workspace = ctx.resolver.workspace_from_url(
mets_url,
dst_dir=os.path.abspath(workspace_dir if workspace_dir else mkdtemp(prefix=TMP_PREFIX)),
mets_basename=ctx.mets_basename,
clobber_mets=clobber_mets,
download=download,
)
workspace.save_mets()
print(workspace.directory)
|
python
|
{
"resource": ""
}
|
q14798
|
workspace_create
|
train
|
def workspace_create(ctx, clobber_mets, directory):
"""
Create a workspace with an empty METS file in DIRECTORY.
Use '.' for $PWD"
"""
workspace = ctx.resolver.workspace_from_nothing(
directory=os.path.abspath(directory),
mets_basename=ctx.mets_basename,
clobber_mets=clobber_mets
)
workspace.save_mets()
print(workspace.directory)
|
python
|
{
"resource": ""
}
|
q14799
|
workspace_add_file
|
train
|
def workspace_add_file(ctx, file_grp, file_id, mimetype, page_id, force, local_filename):
"""
Add a file LOCAL_FILENAME to METS in a workspace.
"""
workspace = Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)
if not local_filename.startswith(ctx.directory):
log.debug("File '%s' is not in workspace, copying", local_filename)
local_filename = ctx.resolver.download_to_directory(ctx.directory, "file://" + local_filename, subdir=file_grp)
url = "file://" + local_filename
workspace.mets.add_file(
fileGrp=file_grp,
ID=file_id,
mimetype=mimetype,
url=url,
pageId=page_id,
force=force,
local_filename=local_filename
)
workspace.save_mets()
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.