sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def entity_delete(args):
""" Delete entity in a workspace. """
msg = "WARNING: this will delete {0} {1} in {2}/{3}".format(
args.entity_type, args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt(msg)):
return
json_body=[{"entityType": args.entity_type,
"entityName": args.entity}]
r = fapi.delete_entities(args.project, args.workspace, json_body)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
print("Succesfully deleted " + args.type + " " + args.entity) | Delete entity in a workspace. | entailment |
def meth_new(args):
""" Submit a new workflow (or update) to the methods repository. """
r = fapi.update_repository_method(args.namespace, args.method,
args.synopsis, args.wdl, args.doc,
args.comment)
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
print("Method %s installed to project %s" % (args.method,
args.namespace))
return 0 | Submit a new workflow (or update) to the methods repository. | entailment |
def meth_delete(args):
""" Remove (redact) a method from the method repository """
message = "WARNING: this will delete workflow \n\t{0}/{1}:{2}".format(
args.namespace, args.method, args.snapshot_id)
if not args.yes and not _confirm_prompt(message):
return
r = fapi.delete_repository_method(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Method %s removed from project %s" % (args.method,args.namespace))
return 0 | Remove (redact) a method from the method repository | entailment |
def meth_wdl(args):
''' Retrieve WDL for given version of a repository method'''
r = fapi.get_repository_method(args.namespace, args.method,
args.snapshot_id, True)
fapi._check_response_code(r, 200)
return r.text | Retrieve WDL for given version of a repository method | entailment |
def meth_acl(args):
''' Retrieve access control list for given version of a repository method'''
r = fapi.get_repository_method_acl(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls) | Retrieve access control list for given version of a repository method | entailment |
def meth_set_acl(args):
""" Assign an ACL role to a list of users for a workflow. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("method {0}/{1} not found".format(args.namespace,
args.method))
return 1
latest = sorted(versions, key=lambda m: m['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_method_acl(args.namespace, args.method, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method,
id))
return 0 | Assign an ACL role to a list of users for a workflow. | entailment |
def expand_fc_groups(users):
""" If user is a firecloud group, return all members of the group.
Caveat is that only group admins may do this.
"""
groups = None
for user in users:
fcgroup = None
if '@' not in user:
fcgroup = user
elif user.lower().endswith('@firecloud.org'):
if groups is None:
r = fapi.get_groups()
fapi._check_response_code(r, 200)
groups = {group['groupEmail'].lower():group['groupName'] \
for group in r.json() if group['role'] == 'Admin'}
if user.lower() not in groups:
if fcconfig.verbosity:
eprint("You do not have access to the members of {}".format(user))
yield user
continue
else:
fcgroup = groups[user.lower()]
else:
yield user
continue
r = fapi.get_group(fcgroup)
fapi._check_response_code(r, 200)
fcgroup_data = r.json()
for admin in fcgroup_data['adminsEmails']:
yield admin
for member in fcgroup_data['membersEmails']:
yield member | If user is a firecloud group, return all members of the group.
Caveat is that only group admins may do this. | entailment |
def meth_list(args):
""" List workflows in the methods repository """
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
# Parse the JSON for the workspace + namespace
methods = r.json()
results = []
for m in methods:
ns = m['namespace']
n = m['name']
sn_id = m['snapshotId']
results.append('{0}\t{1}\t{2}'.format(ns,n,sn_id))
# Sort for easier viewing, ignore case
return sorted(results, key=lambda s: s.lower()) | List workflows in the methods repository | entailment |
def config_start(args):
'''Invoke a task (method configuration), on given entity in given space'''
# Try to use call caching (job avoidance)? Flexibly accept range of answers
cache = getattr(args, "cache", True)
cache = cache is True or (cache.lower() in ["y", "true", "yes", "t", "1"])
if not args.namespace:
args.namespace = fcconfig.method_ns
if not args.namespace:
raise RuntimeError("namespace not provided, or configured by default")
r = fapi.create_submission(args.project, args.workspace,args.namespace,
args.config, args.entity, args.entity_type,
args.expression, use_callcache=cache)
fapi._check_response_code(r, 201)
id = r.json()['submissionId']
return ("Started {0}/{1} in {2}/{3}: id={4}".format(
args.namespace, args.config, args.project, args.workspace, id)), id | Invoke a task (method configuration), on given entity in given space | entailment |
def config_stop(args):
'''Abort a task (method configuration) by submission ID in given space'''
r = fapi.abort_submission(args.project, args.workspace,
args.submission_id)
fapi._check_response_code(r, 204)
return ("Aborted {0} in {1}/{2}".format(args.submission_id,
args.project,
args.workspace)) | Abort a task (method configuration) by submission ID in given space | entailment |
def config_list(args):
""" List configuration(s) in the methods repository or a workspace. """
verbose = fcconfig.verbosity
if args.workspace:
if verbose:
print("Retrieving method configs from space {0}".format(args.workspace))
if not args.project:
eprint("No project given, and no default project configured")
return 1
r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(r, 200)
else:
if verbose:
print("Retrieving method configs from method repository")
r = fapi.list_repository_configs(namespace=args.namespace,
name=args.config,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
# Parse the JSON for the workspace + namespace
methods = r.json()
results = []
for m in methods:
ns = m['namespace']
if not ns:
ns = '__EMPTYSTRING__'
name = m['name']
# Ugh: configs in workspaces look different from configs in methodRepo
mver = m.get('methodRepoMethod', None)
if mver:
mver = mver.get('methodVersion', 'unknown') # space config
else:
mver = m.get('snapshotId', 'unknown') # repo config
results.append('{0}\t{1}\tsnapshotId:{2}'.format(ns, name, mver))
# Sort for easier viewing, ignore case
return sorted(results, key=lambda s: s.lower()) | List configuration(s) in the methods repository or a workspace. | entailment |
def config_acl(args):
''' Retrieve access control list for a method configuration'''
r = fapi.get_repository_config_acl(args.namespace, args.config,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls) | Retrieve access control list for a method configuration | entailment |
def config_set_acl(args):
""" Assign an ACL role to a list of users for a config. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_configs(namespace=args.namespace,
name=args.config)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("Configuration {0}/{1} not found".format(args.namespace,
args.config))
return 1
latest = sorted(versions, key=lambda c: c['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_config_acl(args.namespace, args.config, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.config,
id))
return 0 | Assign an ACL role to a list of users for a config. | entailment |
def config_get(args):
""" Retrieve a method config from a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
# Setting ensure_ascii to False ensures unicode string returns
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False) | Retrieve a method config from a workspace, send stdout | entailment |
def config_wdl(args):
""" Retrieve the WDL for a method config in a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
method = r.json()["methodRepoMethod"]
args.namespace = method["methodNamespace"]
args.method = method["methodName"]
args.snapshot_id = method["methodVersion"]
return meth_wdl(args) | Retrieve the WDL for a method config in a workspace, send stdout | entailment |
def config_diff(args):
"""Compare method configuration definitions across workspaces. Ignores
methodConfigVersion if the verbose argument is not set"""
config_1 = config_get(args).splitlines()
args.project = args.Project
args.workspace = args.Workspace
cfg_1_name = args.config
if args.Config is not None:
args.config = args.Config
if args.Namespace is not None:
args.namespace = args.Namespace
config_2 = config_get(args).splitlines()
if not args.verbose:
config_1 = skip_cfg_ver(config_1)
config_2 = skip_cfg_ver(config_2)
return list(unified_diff(config_1, config_2, cfg_1_name, args.config, lineterm='')) | Compare method configuration definitions across workspaces. Ignores
methodConfigVersion if the verbose argument is not set | entailment |
def config_put(args):
'''Install a valid method configuration into a workspace, in one of several
ways: from a JSON file containing a config definition (both file names
and objects are supported); as a string representing the content of such
a JSON file; or as a dict generated from such JSON content, e.g via
json.loads(). Note that the CLI supports only string & filename input.'''
config = args.config
if os.path.isfile(config):
with open(config, 'r') as fp:
config = json.loads(fp.read())
elif isinstance(config, str):
config = json.loads(config)
elif isinstance(config, dict):
pass
elif hasattr(config, "read"):
config = json.loads(config.read())
else:
raise ValueError('Input method config must be filename, string or dict')
r = fapi.create_workspace_config(args.project, args.workspace, config)
fapi._check_response_code(r, [201])
return True | Install a valid method configuration into a workspace, in one of several
ways: from a JSON file containing a config definition (both file names
and objects are supported); as a string representing the content of such
a JSON file; or as a dict generated from such JSON content, e.g via
json.loads(). Note that the CLI supports only string & filename input. | entailment |
def config_new(args):
'''Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False'''
cfg = config_template(args)
# Iteratively try to edit/install the config: exit iteration by EITHER
# Successful config_put() after editing
# Leaving config unchanged in editor, e.g. quitting out of VI with :q
# FIXME: put an small integer upper bound on the # of loops here
while True:
try:
edited = fccore.edit_text(cfg)
if edited == cfg:
eprint("No edits made, method config not installed ...")
break
if __EDITME__ in edited:
eprint("Edit is incomplete, method config not installed ...")
time.sleep(1)
continue
args.config = cfg = edited
config_put(args)
return True
except FireCloudServerError as fce:
__pretty_print_fc_exception(fce)
return False | Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False | entailment |
def config_delete(args):
""" Remove a method config from a workspace """
r = fapi.delete_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, [200,204])
return r.text if r.text else None | Remove a method config from a workspace | entailment |
def config_copy(args):
""" Copy a method config to new name/space/namespace/project (or all 4) """
if not (args.tospace or args.toname or args.toproject or args.tonamespace):
raise RuntimeError('A new config name OR workspace OR project OR ' +
'namespace must be given (or all)')
copy = fapi.get_workspace_config(args.fromproject, args.fromspace,
args.namespace, args.config)
fapi._check_response_code(copy, 200)
copy = copy.json()
if not args.toname:
args.toname = args.config
if not args.tonamespace:
args.tonamespace = args.namespace
if not args.toproject:
args.toproject = args.fromproject
if not args.tospace:
args.tospace = args.fromspace
copy['name'] = args.toname
copy['namespace'] = args.tonamespace
# Instantiate the copy
r = fapi.overwrite_workspace_config(args.toproject, args.tospace,
args.tonamespace, args.toname, copy)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Method %s/%s:%s copied to %s/%s:%s" % (
args.fromproject, args.fromspace, args.config,
args.toproject, args.tospace, args.toname))
return 0 | Copy a method config to new name/space/namespace/project (or all 4) | entailment |
def attr_get(args):
'''Return a dict of attribute name/value pairs: if entity name & type
are specified then attributes will be retrieved from that entity,
otherwise workspace-level attributes will be returned. By default all
attributes attached to the given object will be returned, but a subset
can be selected by specifying a list of attribute names; names which
refer to a non-existent attribute will be silently ignored. By default
a special __header__ entry is optionally added to the result. '''
if args.entity_type and args.entity:
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
attrs = r.json()['attributes']
# It is wrong for the members of container objects to appear as metadata
# (attributes) attached to the container, as it conflates fundamentally
# different things: annotation vs membership. This can be seen by using
# a suitcase model of reasoning: ATTRIBUTES are METADATA like the weight
# & height of the suitcase, the shipping label and all of its passport
# stamps; while MEMBERS are the actual CONTENTS INSIDE the suitcase.
# This conflation also contradicts the design & docs (which make a clear
# distinction between MEMBERSHIP and UPDATE loadfiles). For this reason
# a change has been requested of the FireCloud dev team (via forum), and
# until it is implemented we will elide "list of members" attributes here
# (but later may provide a way for users to request such, if wanted)
for k in ["samples", "participants", "pairs"]:
attrs.pop(k, None)
else:
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
attrs = r.json()['workspace']['attributes']
if args.attributes: # return a subset of attributes, if requested
attrs = {k:attrs[k] for k in set(attrs).intersection(args.attributes)}
# If some attributes have been collected, return in appropriate format
if attrs:
if args.entity: # Entity attributes
def textify(thing):
if isinstance(thing, dict):
thing = thing.get("items", thing.get("entityName", "__UNKNOWN__"))
return "{0}".format(thing)
result = {args.entity : u'\t'.join(map(textify, attrs.values()))}
# Add "hidden" header of attribute names, for downstream convenience
object_id = u'entity:%s_id' % args.entity_type
result['__header__'] = [object_id] + list(attrs.keys())
else:
result = attrs # Workspace attributes
else:
result = {}
return result | Return a dict of attribute name/value pairs: if entity name & type
are specified then attributes will be retrieved from that entity,
otherwise workspace-level attributes will be returned. By default all
attributes attached to the given object will be returned, but a subset
can be selected by specifying a list of attribute names; names which
refer to a non-existent attribute will be silently ignored. By default
a special __header__ entry is optionally added to the result. | entailment |
def attr_list(args):
'''Retrieve names of all attributes attached to a given object, either
an entity (if entity type+name is provided) or workspace (if not)'''
args.attributes = None
result = attr_get(args)
names = result.get("__header__",[])
if names:
names = names[1:]
else:
names = result.keys()
return sorted(names) | Retrieve names of all attributes attached to a given object, either
an entity (if entity type+name is provided) or workspace (if not) | entailment |
def attr_set(args):
''' Set key=value attributes: if entity name & type are specified then
attributes will be set upon that entity, otherwise the attribute will
be set at the workspace level'''
if args.entity_type and args.entity:
prompt = "Set {0}={1} for {2}:{3} in {4}/{5}?\n[Y\\n]: ".format(
args.attribute, args.value, args.entity_type,
args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt("", prompt)):
return 0
update = fapi._attr_set(args.attribute, args.value)
r = fapi.update_entity(args.project, args.workspace, args.entity_type,
args.entity, [update])
fapi._check_response_code(r, 200)
else:
prompt = "Set {0}={1} in {2}/{3}?\n[Y\\n]: ".format(
args.attribute, args.value, args.project, args.workspace
)
if not (args.yes or _confirm_prompt("", prompt)):
return 0
update = fapi._attr_set(args.attribute, args.value)
r = fapi.update_workspace_attributes(args.project, args.workspace,
[update])
fapi._check_response_code(r, 200)
return 0 | Set key=value attributes: if entity name & type are specified then
attributes will be set upon that entity, otherwise the attribute will
be set at the workspace level | entailment |
def attr_delete(args):
''' Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace'''
if args.entity_type and args.entities:
# Since there is no attribute deletion endpoint, we must perform 2 steps
# here: first we retrieve the entity_ids, and any foreign keys (e.g.
# participant_id for sample_id); and then construct a loadfile which
# specifies which entities are to have what attributes removed. Note
# that FireCloud uses the magic keyword __DELETE__ to indicate that
# an attribute should be deleted from an entity.
# Step 1: see what entities are present, and filter to those requested
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
if args.entities:
entities = [e for e in entities if e['name'] in args.entities]
# Step 2: construct a loadfile to delete these attributes
attrs = sorted(args.attributes)
etype = args.entity_type
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
line = name
# TODO: Fix other types?
if etype == "sample":
line += "\t" + entity_dict['attributes']['participant']['entityName']
for attr in attrs:
line += "\t__DELETE__"
# Improve performance by only updating records that have changed
entity_data.append(line)
entity_header = ["entity:" + etype + "_id"]
if etype == "sample":
entity_header.append("participant_id")
entity_header = '\t'.join(entity_header + list(attrs))
# Remove attributes from an entity
message = "WARNING: this will delete these attributes:\n\n" + \
','.join(args.attributes) + "\n\n"
if args.entities:
message += 'on these {0}s:\n\n'.format(args.entity_type) + \
', '.join(args.entities)
else:
message += 'on all {0}s'.format(args.entity_type)
message += "\n\nin workspace {0}/{1}\n".format(args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
# TODO: reconcile with other batch updates
# Chunk the entities into batches of 500, and upload to FC
if args.verbose:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
if args.verbose:
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = entity_header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
else:
message = "WARNING: this will delete the following attributes in " + \
"{0}/{1}\n\t".format(args.project, args.workspace) + \
"\n\t".join(args.attributes)
if not (args.yes or _confirm_prompt(message)):
return 0
updates = [fapi._attr_rem(a) for a in args.attributes]
r = fapi.update_workspace_attributes(args.project, args.workspace,
updates)
fapi._check_response_code(r, 200)
return 0 | Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace | entailment |
def attr_copy(args):
""" Copy workspace attributes between workspaces. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
# First get the workspace attributes of the source workspace
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
# Parse the attributes
workspace_attrs = r.json()['workspace']['attributes']
# If we passed attributes, only use those
if args.attributes:
workspace_attrs = {k:v for k, v in iteritems(workspace_attrs)
if k in args.attributes}
if len(workspace_attrs) == 0:
print("No workspace attributes defined in {0}/{1}".format(
args.project, args.workspace))
return 1
message = "This will copy the following workspace attributes to {0}/{1}\n"
message = message.format(args.to_project, args.to_workspace)
for k, v in sorted(iteritems(workspace_attrs)):
message += '\t{0}\t{1}\n'.format(k, v)
if not args.yes and not _confirm_prompt(message):
return 0
# make the attributes into updates
updates = [fapi._attr_set(k,v) for k,v in iteritems(workspace_attrs)]
r = fapi.update_workspace_attributes(args.to_project, args.to_workspace,
updates)
fapi._check_response_code(r, 200)
return 0 | Copy workspace attributes between workspaces. | entailment |
def attr_fill_null(args):
"""
Assign the null sentinel value for all entities which do not have a value
for the given attributes.
see gs://broad-institute-gdac/GDAC_FC_NULL for more details
"""
NULL_SENTINEL = "gs://broad-institute-gdac/GDAC_FC_NULL"
attrs = args.attributes
if not attrs:
print("Error: provide at least one attribute to set")
return 1
if 'participant' in attrs or 'samples' in attrs:
print("Error: can't assign null to samples or participant")
return 1
# Set entity attributes
if args.entity_type is not None:
print("Collecting entity data...")
# Get existing attributes
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
# samples need participant_id as well
#TODO: This may need more fixing for other types
orig_attrs = list(attrs)
if args.entity_type == "sample":
attrs.insert(0, "participant_id")
header = "entity:" + args.entity_type + "_id\t" + "\t".join(attrs)
# Book keep the number of updates for each attribute
attr_update_counts = {a : 0 for a in orig_attrs}
# construct new entity data by inserting null sentinel, and counting
# the number of updates
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
etype = entity_dict['entityType']
e_attrs = entity_dict['attributes']
line = name
altered = False
for attr in attrs:
if attr == "participant_id":
line += "\t" + e_attrs['participant']['entityName']
continue # This attribute is never updated by fill_null
if attr not in e_attrs:
altered = True
attr_update_counts[attr] += 1
line += "\t" + str(e_attrs.get(attr, NULL_SENTINEL))
# Improve performance by only updating records that have changed
if altered:
entity_data.append(line)
# Check to see if all entities are being set to null for any attributes
# This is usually a mistake, so warn the user
num_entities = len(entities)
prompt = "Continue? [Y\\n]: "
for attr in orig_attrs:
if num_entities == attr_update_counts[attr]:
message = "WARNING: no {0}s with attribute '{1}'\n".format(
args.entity_type, attr
)
if not args.yes and not _confirm_prompt(message, prompt):
return
# check to see if no sentinels are necessary
if not any(c != 0 for c in itervalues(attr_update_counts)):
print("No null sentinels required, exiting...")
return 0
if args.to_loadfile:
print("Saving loadfile to " + args.to_loadfile)
with open(args.to_loadfile, "w") as f:
f.write(header + '\n')
f.write("\n".join(entity_data))
return 0
updates_table = " count attribute\n"
for attr in sorted(attr_update_counts):
count = attr_update_counts[attr]
updates_table += "{0:>10} {1}\n".format(count, attr)
message = "WARNING: This will insert null sentinels for " \
"these attributes:\n" + updates_table
if not args.yes and not _confirm_prompt(message):
return 0
# Chunk the entities into batches of 500, and upload to FC
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
return 0
else:
# TODO: set workspace attributes
print("attr_fill_null requires an entity type")
return 1 | Assign the null sentinel value for all entities which do not have a value
for the given attributes.
see gs://broad-institute-gdac/GDAC_FC_NULL for more details | entailment |
def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content | Health FireCloud Server | entailment |
def mop(args):
''' Clean up unreferenced data in a workspace'''
# First retrieve the workspace to get bucket information
if args.verbose:
print("Retrieving workspace information...")
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
workspace = r.json()
bucket = workspace['workspace']['bucketName']
bucket_prefix = 'gs://' + bucket
workspace_name = workspace['workspace']['name']
if args.verbose:
print("{0} -- {1}".format(workspace_name, bucket_prefix))
referenced_files = set()
for value in workspace['workspace']['attributes'].values():
if isinstance(value, string_types) and value.startswith(bucket_prefix):
referenced_files.add(value)
# TODO: Make this more efficient with a native api call?
# # Now run a gsutil ls to list files present in the bucket
try:
gsutil_args = ['gsutil', 'ls', 'gs://' + bucket + '/**']
if args.verbose:
print(' '.join(gsutil_args))
bucket_files = subprocess.check_output(gsutil_args, stderr=subprocess.PIPE)
# Check output produces a string in Py2, Bytes in Py3, so decode if necessary
if type(bucket_files) == bytes:
bucket_files = bucket_files.decode()
except subprocess.CalledProcessError as e:
eprint("Error retrieving files from bucket: " + str(e))
return 1
bucket_files = set(bucket_files.strip().split('\n'))
if args.verbose:
num = len(bucket_files)
if args.verbose:
print("Found {0} files in bucket {1}".format(num, bucket))
# Now build a set of files that are referenced in the bucket
# 1. Get a list of the entity types in the workspace
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
entity_types = r.json().keys()
# 2. For each entity type, request all the entities
for etype in entity_types:
if args.verbose:
print("Getting annotations for " + etype + " entities...")
# use the paginated version of the query
entities = _entity_paginator(args.project, args.workspace, etype,
page_size=1000, filter_terms=None,
sort_direction="asc")
for entity in entities:
for value in entity['attributes'].values():
if isinstance(value, string_types) and value.startswith(bucket_prefix):
# 'value' is a file in this bucket
referenced_files.add(value)
if args.verbose:
num = len(referenced_files)
print("Found {0} referenced files in workspace {1}".format(num, workspace_name))
# Set difference shows files in bucket that aren't referenced
unreferenced_files = bucket_files - referenced_files
# Filter out files like .logs and rc.txt
def can_delete(f):
'''Return true if this file should not be deleted in a mop.'''
# Don't delete logs
if f.endswith('.log'):
return False
# Don't delete return codes from jobs
if f.endswith('-rc.txt'):
return False
# Don't delete tool's exec.sh
if f.endswith('exec.sh'):
return False
return True
deleteable_files = [f for f in unreferenced_files if can_delete(f)]
if len(deleteable_files) == 0:
if args.verbose:
print("No files to mop in " + workspace['workspace']['name'])
return 0
if args.verbose or args.dry_run:
print("Found {0} files to delete:\n".format(len(deleteable_files))
+ "\n".join(deleteable_files ) + '\n')
message = "WARNING: Delete {0} files in {1} ({2})".format(
len(deleteable_files), bucket_prefix, workspace['workspace']['name'])
if args.dry_run or (not args.yes and not _confirm_prompt(message)):
return 0
# Pipe the deleteable_files into gsutil rm to remove them
gsrm_args = ['gsutil', '-m', 'rm', '-I']
PIPE = subprocess.PIPE
STDOUT=subprocess.STDOUT
if args.verbose:
print("Deleting files with gsutil...")
gsrm_proc = subprocess.Popen(gsrm_args, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# Pipe the deleteable_files into gsutil
result = gsrm_proc.communicate(input='\n'.join(deleteable_files).encode())[0]
if args.verbose:
if type(result) == bytes:
result = result.decode()
print(result.rstrip())
return 0 | Clean up unreferenced data in a workspace | entailment |
def sset_loop(args):
''' Loop over all sample sets in a workspace, performing a func '''
# Ensure that the requested action is a valid fiss_cmd
fiss_func = __cmd_to_func(args.action)
if not fiss_func:
eprint("invalid FISS cmd '" + args.action + "'")
return 1
# First get the sample set names
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [entity['name'] for entity in r.json()]
args.entity_type = "sample_set"
for sset in sample_sets:
print('\n# {0}::{1}/{2} {3}'.format(args.project, args.workspace, sset,
args.action))
args.entity = sset
# Note how this code is similar to how args.func is called in
# main so it may make sense to try to a common method for both
try:
result = fiss_func(args)
except Exception as e:
status = __pretty_print_fc_exception(e)
if not args.keep_going:
return status
printToCLI(result)
return 0 | Loop over all sample sets in a workspace, performing a func | entailment |
def monitor(args):
''' Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date'''
r = fapi.list_submissions(args.project, args.workspace)
fapi._check_response_code(r, 200)
statuses = sorted(r.json(), key=lambda k: k['submissionDate'], reverse=True)
header = '\t'.join(list(statuses[0].keys()))
expander = lambda v: '{0}'.format(v)
def expander(thing):
if isinstance(thing, dict):
entityType = thing.get("entityType", None)
if entityType:
return "{0}:{1}".format(entityType, thing['entityName'])
return "{0}".format(thing)
# FIXME: this will generally return different column order between Python 2/3
return [header] + ['\t'.join( map(expander, v.values())) for v in statuses] | Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date | entailment |
def supervise(args):
''' Run legacy, Firehose-style workflow of workflows'''
project = args.project
workspace = args.workspace
namespace = args.namespace
workflow = args.workflow
sample_sets = args.sample_sets
recovery_file = args.json_checkpoint
# If no sample sets are provided, run on all sample sets
if not sample_sets:
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [s['name'] for s in r.json()]
message = "Sample Sets ({}):\n\t".format(len(sample_sets)) + \
"\n\t".join(sample_sets)
prompt = "\nLaunch workflow in " + project + "/" + workspace + \
" on these sample sets? [Y\\n]: "
if not args.yes and not _confirm_prompt(message, prompt):
return
return supervisor.supervise(project, workspace, namespace, workflow,
sample_sets, recovery_file) | Run legacy, Firehose-style workflow of workflows | entailment |
def entity_copy(args):
""" Copy entities from one workspace to another. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
if not args.entities:
# get a list of entities from source workspace matching entity_type
ents = _entity_paginator(args.project, args.workspace, args.entity_type,
page_size=500, filter_terms=None,
sort_direction='asc')
args.entities = [e['name'] for e in ents]
prompt = "Copy {0} {1}(s) from {2}/{3} to {4}/{5}?\n[Y\\n]: "
prompt = prompt.format(len(args.entities), args.entity_type, args.project,
args.workspace, args.to_project, args.to_workspace)
if not args.yes and not _confirm_prompt("", prompt):
return
r = fapi.copy_entities(args.project, args.workspace, args.to_project,
args.to_workspace, args.entity_type, args.entities,
link_existing_entities=args.link)
fapi._check_response_code(r, 201)
return 0 | Copy entities from one workspace to another. | entailment |
def proj_list(args):
'''Retrieve the list of billing projects accessible to the caller/user, and
show the level of access granted for each (e.g. Owner, User, ...)'''
projects = fapi.list_billing_projects()
fapi._check_response_code(projects, 200)
projects = sorted(projects.json(), key=lambda d: d['projectName'])
l = map(lambda p: '{0}\t{1}'.format(p['projectName'], p['role']), projects)
# FIXME: add username col to output, for when iterating over multiple users
return ["Project\tRole"] + l | Retrieve the list of billing projects accessible to the caller/user, and
show the level of access granted for each (e.g. Owner, User, ...) | entailment |
def config_validate(args):
''' Validate a workspace configuration: if an entity was specified (i.e.
upon which the configuration should operate), then also validate that
the entity has the necessary attributes'''
r = fapi.validate_config(args.project, args.workspace, args.namespace,
args.config)
fapi._check_response_code(r, 200)
entity_d = None
config_d = r.json()
if args.entity:
entity_type = config_d['methodConfiguration']['rootEntityType']
entity_r = fapi.get_entity(args.project, args.workspace,
entity_type, args.entity)
fapi._check_response_code(entity_r, [200,404])
if entity_r.status_code == 404:
eprint("Error: No {0} named '{1}'".format(entity_type, args.entity))
return 2
else:
entity_d = entity_r.json()
# also get the workspace info
w = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(w, 200)
workspace_d = w.json()
ii, io, ma, mwa = _validate_helper(args, config_d, workspace_d, entity_d)
ii_msg = "\nInvalid inputs:"
io_msg = "\nInvalid outputs:"
ma_msg = "\n{0} {1} doesn't satisfy the following inputs:".format(entity_type, args.entity) if args.entity else ""
mwa_msg = "\nWorkspace {0}/{1} doesn't satisfy following inputs:".format(args.project, args.workspace)
for errs, msg in zip([ii, io, ma, mwa], [ii_msg, io_msg, ma_msg, mwa_msg]):
if errs:
print(msg)
for inp, val in errs:
print("{0} -> {1}".format(inp, val))
if ii + io + ma + mwa:
return 1 | Validate a workspace configuration: if an entity was specified (i.e.
upon which the configuration should operate), then also validate that
the entity has the necessary attributes | entailment |
def _validate_helper(args, config_d, workspace_d, entity_d=None):
""" Return FISSFC validation information on config for a certain entity """
# 4 ways to have invalid config:
invalid_inputs = sorted(config_d["invalidInputs"])
invalid_outputs = sorted(config_d["invalidOutputs"])
# Also insert values for invalid i/o
invalid_inputs = [(i, config_d['methodConfiguration']['inputs'][i]) for i in invalid_inputs]
invalid_outputs = [(i, config_d['methodConfiguration']['outputs'][i]) for i in invalid_outputs]
missing_attrs = []
missing_wksp_attrs = []
# If an entity was provided, also check to see if that entity has the necessary inputs
if entity_d:
entity_type = config_d['methodConfiguration']['rootEntityType']
# If the attribute is listed here, it has an entry
entity_attrs = set(entity_d['attributes'])
# Optimization, only get the workspace attrs if the method config has any
workspace_attrs = workspace_d['workspace']['attributes']
# So now iterate over the inputs
for inp, val in iteritems(config_d['methodConfiguration']['inputs']):
# Must be an attribute on the entity
if val.startswith("this."):
# Normally, the value is of the form 'this.attribute',
# but for operations on sets, e.g. one can also do
# 'this.samples.attr'. But even in this case, there must be a
# 'samples' attribute on the sample set, so checking for the middle
# value works as expected. Other pathological cases would've been
# caught above by the validation endpoint
expected_attr = val.split('.')[1]
# 'name' is special, it really means '_id', which everything has
if expected_attr == "name":
continue
if expected_attr not in entity_attrs:
missing_attrs.append((inp, val))
if val.startswith("workspace."):
# Anything not matching this format will be caught above
expected_attr = val.split('.')[1]
if expected_attr not in workspace_attrs:
missing_wksp_attrs.append((inp, val))
# Anything else is a literal
return invalid_inputs, invalid_outputs, missing_attrs, missing_wksp_attrs | Return FISSFC validation information on config for a certain entity | entailment |
def runnable(args):
""" Show me what can be run in a given workspace """
w = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(w, 200)
workspace_d = w.json()
if args.config and args.namespace and not args.entity:
# See what entities I can run on with this config
r = fapi.validate_config(args.project, args.workspace, args.namespace,
args.config)
fapi._check_response_code(r, 200)
config_d = r.json()
# First validate without any sample sets
errs = sum(_validate_helper(args, config_d, workspace_d, None), [])
if errs:
print("Configuration contains invalid expressions")
return 1
# Now get all the possible entities, and evaluate each
entity_type = config_d['methodConfiguration']['rootEntityType']
ent_r = fapi.get_entities(args.project, args.workspace, entity_type)
fapi._check_response_code(r, 200)
entities = ent_r.json()
can_run_on = []
cannot_run_on = []
# Validate every entity
for entity_d in entities:
# If there are errors in the validation
if sum(_validate_helper(args, config_d, workspace_d, entity_d), []):
cannot_run_on.append(entity_d['name'])
else:
can_run_on.append(entity_d['name'])
# Print what can be run
if can_run_on:
print("{0} CAN be run on {1} {2}(s):".format(args.config, len(can_run_on), entity_type))
print("\n".join(can_run_on)+"\n")
print("{0} CANNOT be run on {1} {2}(s)".format(args.config, len(cannot_run_on), entity_type))
#print("\n".join(cannot_run_on))
# See what method configs are possible for the given sample set
elif args.entity and args.entity_type and not args.config:
entity_r = fapi.get_entity(args.project, args.workspace,
args.entity_type, args.entity)
fapi._check_response_code(entity_r, [200,404])
if entity_r.status_code == 404:
print("Error: No {0} named '{1}'".format(args.entity_type, args.entity))
return 2
entity_d = entity_r.json()
# Now get all the method configs in the workspace
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
# Iterate over configs in the workspace, and validate against them
for cfg in conf_r.json():
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace,
cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue
config_d = r.json()
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d),[])
if not errs:
print(cfg['namespace'] + "/" + cfg['name'])
elif args.entity_type:
# Last mode, build a matrix of everything based on the entity type
# Get all of the entity_type
ent_r = fapi.get_entities(args.project, args.workspace, args.entity_type)
fapi._check_response_code(ent_r, 200)
entities = ent_r.json()
entity_names = sorted(e['name'] for e in entities)
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
conf_list = conf_r.json()
config_names = sorted(c['namespace'] + '/' + c['name'] for c in conf_list)
mat = {c:dict() for c in config_names}
# Now iterate over configs, building up the matrix
# Iterate over configs in the workspace, and validate against them
for cfg in conf_list:
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace,
cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue
config_d = r.json()
# Validate against every entity
for entity_d in entities:
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d),[])
#TODO: True/False? Y/N?
symbol = "X" if not errs else ""
cfg_name = cfg['namespace'] + '/' + cfg['name']
mat[cfg_name][entity_d['name']] = symbol
# Now print the validation matrix
# headers
print("Namespace/Method Config\t" + "\t".join(entity_names))
for conf in config_names:
print(conf + "\t" + "\t".join(mat[conf][e] for e in entity_names))
else:
print("runnable requires a namespace+configuration or entity type")
return 1 | Show me what can be run in a given workspace | entailment |
def _confirm_prompt(message, prompt="\nAre you sure? [y/yes (default: no)]: ",
affirmations=("Y", "Yes", "yes", "y")):
"""
Display a message, then confirmation prompt, and return true
if the user responds with one of the affirmations.
"""
answer = input(message + prompt)
return answer in affirmations | Display a message, then confirmation prompt, and return true
if the user responds with one of the affirmations. | entailment |
def _nonempty_project(string):
"""
Argparse validator for ensuring a workspace is provided
"""
value = str(string)
if len(value) == 0:
msg = "No project provided and no default project configured"
raise argparse.ArgumentTypeError(msg)
return value | Argparse validator for ensuring a workspace is provided | entailment |
def _entity_paginator(namespace, workspace, etype, page_size=500,
filter_terms=None, sort_direction="asc"):
"""Pages through the get_entities_query endpoint to get all entities in
the workspace without crashing.
"""
page = 1
all_entities = []
# Make initial request
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
response_body = r.json()
# Get the total number of pages
total_pages = response_body['resultMetadata']['filteredPageCount']
# append the first set of results
entities = response_body['results']
all_entities.extend(entities)
# Now iterate over remaining pages to retrieve all the results
page = 2
while page <= total_pages:
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
entities = r.json()['results']
all_entities.extend(entities)
page += 1
return all_entities | Pages through the get_entities_query endpoint to get all entities in
the workspace without crashing. | entailment |
def __cmd_to_func(cmd):
""" Returns the function object in this module matching cmd. """
fiss_module = sys.modules[__name__]
# Returns None if string is not a recognized FISS command
func = getattr(fiss_module, cmd, None)
if func and not hasattr(func, 'fiss_cmd'):
func = None
return func | Returns the function object in this module matching cmd. | entailment |
def _valid_headerline(l):
"""return true if the given string is a valid loadfile header"""
if not l:
return False
headers = l.split('\t')
first_col = headers[0]
tsplit = first_col.split(':')
if len(tsplit) != 2:
return False
if tsplit[0] in ('entity', 'update'):
return tsplit[1] in ('participant_id', 'participant_set_id',
'sample_id', 'sample_set_id',
'pair_id', 'pair_set_id')
elif tsplit[0] == 'membership':
if len(headers) < 2:
return False
# membership:sample_set_id sample_id, e.g.
return tsplit[1].replace('set_', '') == headers[1]
else:
return False | return true if the given string is a valid loadfile header | entailment |
def _batch_load(project, workspace, headerline, entity_data, chunk_size=500):
""" Submit a large number of entity updates in batches of chunk_size """
if fcconfig.verbosity:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
# Parse the entity type from the first cell, e.g. "entity:sample_id"
# First check that the header is valid
if not _valid_headerline(headerline):
eprint("Invalid loadfile header:\n" + headerline)
return 1
update_type = "membership" if headerline.startswith("membership") else "entitie"
etype = headerline.split('\t')[0].split(':')[1].replace("_id", "")
# Split entity_data into chunks
total = int(len(entity_data) / chunk_size) + 1
batch = 0
for i in range(0, len(entity_data), chunk_size):
batch += 1
if fcconfig.verbosity:
print("Updating {0} {1}s {2}-{3}, batch {4}/{5}".format(
etype, update_type, i+1, min(i+chunk_size, len(entity_data)),
batch, total))
this_data = headerline + '\n' + '\n'.join(entity_data[i:i+chunk_size])
# Now push the entity data to firecloud
r = fapi.upload_entities(project, workspace, this_data)
fapi._check_response_code(r, 200)
return 0 | Submit a large number of entity updates in batches of chunk_size | entailment |
def main_as_cli(argv=None):
'''Use this entry point to call HL fiss funcs as though from the UNIX CLI.
(see firecloud/tests/highlevel_tests.py:call_cli for usage examples)'''
try:
result = main(argv)
except Exception as e:
result = __pretty_print_fc_exception(e)
# FIXME: we should invert True/False return values to 0/1 here, to comply
# with UNIX exit code semantics (and avoid problems with make, scripts, etc)
return printToCLI(result) | Use this entry point to call HL fiss funcs as though from the UNIX CLI.
(see firecloud/tests/highlevel_tests.py:call_cli for usage examples) | entailment |
def create_payload(entities):
"""Create a tsv payload describing entities.
A TSV payload consists of 1 header row describing entity type
and attribute names. Each subsequent line is an entity_id followed
by attribute values separated by the tab "\\t" character. This
payload can be uploaded to the workspace via
firecloud.api.upload_entities()
"""
#First check that all entities are of the same type
types = {e.etype for e in entities}
if len(types) != 1:
raise ValueError("Can't create payload with " +
str(len(types)) + " types")
all_attrs = set()
for e in entities:
all_attrs.update(set(e.attrs.keys()))
#Write a header line
all_attrs = list(all_attrs)
header = "entity:" + entities[0].etype + "_id"
payload = '\t'.join([header] + all_attrs) + '\n'
for e in entities:
line = e.entity_id
for a in all_attrs:
line += '\t' + e.attrs.get(a, "")
payload += line + '\n'
return payload | Create a tsv payload describing entities.
A TSV payload consists of 1 header row describing entity type
and attribute names. Each subsequent line is an entity_id followed
by attribute values separated by the tab "\\t" character. This
payload can be uploaded to the workspace via
firecloud.api.upload_entities() | entailment |
def create_loadfile(entities, f):
"""Create payload and save to file."""
with open(f, 'w') as out:
out.write(Entity.create_payload(entities)) | Create payload and save to file. | entailment |
def needs_gcloud(self):
"""Returns true if gcloud is unavailable and needed for
authentication."""
gcloud_default_path = ['google-cloud-sdk', 'bin']
if platform.system() != "Windows":
gcloud_default_path = os.path.join(os.path.expanduser('~'),
*gcloud_default_path)
else:
gcloud_default_path = os.path.join(os.environ['LOCALAPPDATA'],
'Google', 'Cloud SDK',
*gcloud_default_path)
return not os.getenv('SERVER_SOFTWARE',
'').startswith('Google App Engine/') \
and gcloud_default_path not in os.environ["PATH"].split(os.pathsep) \
and which('gcloud') is None | Returns true if gcloud is unavailable and needed for
authentication. | entailment |
def action(arguments):
"""
Run mogrify. Most of the action is in convert, this just creates a temp
file for the output.
"""
for input_file in arguments.input_files:
logging.info(input_file)
# Generate a temporary file
with common.atomic_write(
input_file.name, file_factory=common.FileType('wt')) as tf:
convert.transform_file(input_file, tf, arguments)
if hasattr(input_file, 'close'):
input_file.close() | Run mogrify. Most of the action is in convert, this just creates a temp
file for the output. | entailment |
def all_unambiguous(sequence_str):
"""
All unambiguous versions of sequence_str
"""
result = [[]]
for c in sequence_str:
result = [i + [a] for i in result for a in _AMBIGUOUS_MAP.get(c, c)]
return [''.join(i) for i in result] | All unambiguous versions of sequence_str | entailment |
def build_parser(parser):
"""
Generate a subparser
"""
parser.add_argument(
'sequence_file',
type=FileType('r'),
help="""Input fastq file. A fasta-format file may also be provided
if --input-qual is also specified.""")
parser.add_argument(
'--input-qual',
type=FileType('r'),
help="""The quality scores associated with the input file. Only
used if input file is fasta.""")
parser.add_argument(
'output_file',
type=FileType('w'),
help="""Output file. Format determined from extension.""")
output_group = parser.add_argument_group("Output")
output_group.add_argument(
'--report-out',
type=FileType('w'),
default=sys.stdout,
help="""Output file for report [default:
stdout]""")
output_group.add_argument(
'--details-out',
type=FileType('w'),
help="""Output file to report fate of each sequence""")
output_group.add_argument(
'--no-details-comment',
action='store_false',
default=True,
dest='details_comment',
help="""Do not write comment
lines with version and call to start --details-out""")
parser.add_argument(
'--min-mean-quality',
metavar='QUALITY',
type=float,
default=DEFAULT_MEAN_SCORE,
help="""Minimum mean quality score for
each read [default: %(default)s]""")
parser.add_argument(
'--min-length',
metavar='LENGTH',
type=int,
default=200,
help="""Minimum length to keep sequence [default:
%(default)s]""")
parser.add_argument(
'--max-length',
metavar='LENGTH',
type=int,
default=1000,
help="""Maximum length to keep before truncating
[default: %(default)s]. This operation occurs before
--max-ambiguous""")
window_group = parser.add_argument_group('Quality window options')
window_group.add_argument(
'--quality-window-mean-qual',
type=float,
help="""Minimum quality score within the window defined by
--quality-window. [default: same as --min-mean-quality]""")
window_group.add_argument(
'--quality-window-prop',
help="""Proportion of
reads within quality window to that must pass filter. Floats are [default:
%(default).1f]""",
default=1.0,
type=typed_range(float, 0.0, 1.0))
window_group.add_argument(
'--quality-window',
type=int,
metavar='WINDOW_SIZE',
default=0,
help="""Window size for truncating sequences. When set
to a non-zero value, sequences are truncated where the mean mean
quality within the window drops below --min-mean-quality.
[default: %(default)s]""")
parser.add_argument(
'--ambiguous-action',
choices=('truncate', 'drop'),
help="""Action to take on ambiguous base in sequence (N's).
[default: no action]""")
parser.add_argument(
'--max-ambiguous',
default=None,
help="""Maximum number
of ambiguous bases in a sequence. Sequences exceeding this count
will be removed.""",
type=int)
parser.add_argument(
'--pct-ambiguous',
help="""Maximun percent of
ambiguous bases in a sequence. Sequences exceeding this percent
will be removed.""",
type=float)
barcode_group = parser.add_argument_group('Barcode/Primer')
primer_group = barcode_group.add_mutually_exclusive_group()
primer_group.add_argument(
'--primer', help="""IUPAC ambiguous primer to
require""")
primer_group.add_argument(
'--no-primer',
help="""Do not use a primer.""",
action='store_const',
const='',
dest='primer')
barcode_group.add_argument(
'--barcode-file',
help="""CSV file containing
sample_id,barcode[,primer] in the rows. A single primer for all
sequences may be specified with `--primer`, or `--no-primer` may be
used to indicate barcodes should be used without a primer
check.""",
type=FileType('r'))
barcode_group.add_argument(
'--barcode-header',
action='store_true',
default=False,
help="""Barcodes have a header row [default:
%(default)s]""")
barcode_group.add_argument(
'--map-out',
help="""Path to write
sequence_id,sample_id pairs""",
type=FileType('w'),
metavar='SAMPLE_MAP')
barcode_group.add_argument(
'--quoting',
help="""A string naming an
attribute of the csv module defining the quoting behavior for
`SAMPLE_MAP`. [default: %(default)s]""",
default='QUOTE_MINIMAL',
choices=[s for s in dir(csv) if s.startswith('QUOTE_')]) | Generate a subparser | entailment |
def moving_average(iterable, n):
"""
From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
"""
it = iter(iterable)
d = collections.deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n) | From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0 | entailment |
def parse_barcode_file(fp, primer=None, header=False):
"""
Load label, barcode, primer records from a CSV file.
Returns a map from barcode -> label
Any additional columns are ignored
"""
tr = trie.trie()
reader = csv.reader(fp)
if header:
# Skip header
next(reader)
# Skip blank rows
records = (record for record in reader if record)
for record in records:
specimen, barcode = record[:2]
if primer is not None:
pr = primer
else:
pr = record[2]
for sequence in all_unambiguous(barcode + pr):
if sequence in tr:
raise ValueError("Duplicate sample: {0}, {1} both have {2}",
specimen, tr[sequence], sequence)
logging.info('%s->%s', sequence, specimen)
tr[sequence] = specimen
return tr | Load label, barcode, primer records from a CSV file.
Returns a map from barcode -> label
Any additional columns are ignored | entailment |
def action(arguments):
"""
Given parsed arguments, filter input files.
"""
if arguments.quality_window_mean_qual and not arguments.quality_window:
raise ValueError("--quality-window-mean-qual specified without "
"--quality-window")
if trie is None or triefind is None:
raise ValueError(
'Missing Bio.trie and/or Bio.triefind modules. Cannot continue')
filters = []
input_type = fileformat.from_handle(arguments.sequence_file)
output_type = fileformat.from_handle(arguments.output_file)
with arguments.sequence_file as fp:
if arguments.input_qual:
sequences = QualityIO.PairedFastaQualIterator(
fp, arguments.input_qual)
else:
sequences = SeqIO.parse(fp, input_type)
listener = RecordEventListener()
if arguments.details_out:
rh = RecordReportHandler(arguments.details_out, arguments.argv,
arguments.details_comment)
rh.register_with(listener)
# Track read sequences
sequences = listener.iterable_hook('read', sequences)
# Add filters
if arguments.min_mean_quality and input_type == 'fastq':
qfilter = QualityScoreFilter(arguments.min_mean_quality)
filters.append(qfilter)
if arguments.max_length:
max_length_filter = MaxLengthFilter(arguments.max_length)
filters.append(max_length_filter)
if arguments.min_length:
min_length_filter = MinLengthFilter(arguments.min_length)
filters.append(min_length_filter)
if arguments.max_ambiguous is not None:
max_ambig_filter = MaxAmbiguousFilter(arguments.max_ambiguous)
filters.append(max_ambig_filter)
if arguments.pct_ambiguous is not None:
pct_ambig_filter = PctAmbiguousFilter(arguments.pct_ambiguous)
filters.append(pct_ambig_filter)
if arguments.ambiguous_action:
ambiguous_filter = AmbiguousBaseFilter(arguments.ambiguous_action)
filters.append(ambiguous_filter)
if arguments.quality_window:
min_qual = (arguments.quality_window_mean_qual or
arguments.min_mean_quality)
window_filter = WindowQualityScoreFilter(arguments.quality_window,
min_qual)
filters.insert(0, window_filter)
if arguments.barcode_file:
with arguments.barcode_file:
tr = parse_barcode_file(arguments.barcode_file,
arguments.primer,
arguments.barcode_header)
f = PrimerBarcodeFilter(tr)
filters.append(f)
if arguments.map_out:
barcode_writer = csv.writer(
arguments.map_out,
quoting=getattr(csv, arguments.quoting),
lineterminator='\n')
def barcode_handler(record, sample, barcode=None):
barcode_writer.writerow((record.id, sample))
listener.register_handler('found_barcode', barcode_handler)
for f in filters:
f.listener = listener
sequences = f.filter_records(sequences)
# Track sequences which passed all filters
sequences = listener.iterable_hook('write', sequences)
with arguments.output_file:
SeqIO.write(sequences, arguments.output_file, output_type)
rpt_rows = (f.report_dict() for f in filters)
# Write report
with arguments.report_out as fp:
writer = csv.DictWriter(
fp, BaseFilter.report_fields, lineterminator='\n', delimiter='\t')
writer.writeheader()
writer.writerows(rpt_rows) | Given parsed arguments, filter input files. | entailment |
def iterable_hook(self, name, iterable):
"""
Fire an event named ``name`` with each item in iterable
"""
for record in iterable:
self(name, record)
yield record | Fire an event named ``name`` with each item in iterable | entailment |
def _found_barcode(self, record, sample, barcode=None):
"""Hook called when barcode is found"""
assert record.id == self.current_record['sequence_name']
self.current_record['sample'] = sample | Hook called when barcode is found | entailment |
def filter_records(self, records):
"""
Apply the filter to records
"""
for record in records:
try:
filtered = self.filter_record(record)
assert (filtered)
# Quick tracking whether the sequence was modified
if filtered.seq == record.seq:
self.passed_unchanged += 1
else:
self.passed_changed += 1
yield filtered
except FailedFilter as e:
self.failed += 1
v = e.value
if self.listener:
self.listener(
'failed_filter',
record,
filter_name=self.name,
value=v) | Apply the filter to records | entailment |
def filter_record(self, record):
"""
Filter a single record
"""
quality_scores = record.letter_annotations['phred_quality']
mean_score = mean(quality_scores)
if mean_score >= self.min_mean_score:
return record
else:
raise FailedFilter(mean_score) | Filter a single record | entailment |
def filter_record(self, record):
"""
Filter a single record
"""
quality_scores = record.letter_annotations['phred_quality']
# Simple case - window covers whole sequence
if len(record) <= self.window_size:
mean_score = mean(quality_scores)
if mean_score >= self.min_mean_score:
return record
else:
raise FailedFilter(mean_score)
# Find the right clipping point. Start clipping at the beginning of the
# sequence, then extend the window to include regions with acceptable
# mean quality scores.
clip_right = 0
for i, a in enumerate(
moving_average(quality_scores, self.window_size)):
if a >= self.min_mean_score:
clip_right = i + self.window_size
else:
break
if clip_right:
return record[:clip_right]
else:
# First window failed - record fails
raise FailedFilter() | Filter a single record | entailment |
def filter_record(self, record):
"""
Filter a record, truncating or dropping at an 'N'
"""
nloc = record.seq.find('N')
if nloc == -1:
return record
elif self.action == 'truncate':
return record[:nloc]
elif self.action == 'drop':
raise FailedFilter()
else:
assert False | Filter a record, truncating or dropping at an 'N' | entailment |
def filter_record(self, record):
"""
Filter record, dropping any that don't meet minimum length
"""
if len(record) >= self.min_length:
return record
else:
raise FailedFilter(len(record)) | Filter record, dropping any that don't meet minimum length | entailment |
def filter_record(self, record):
"""
Filter record, truncating any over some maximum length
"""
if len(record) >= self.max_length:
return record[:self.max_length]
else:
return record | Filter record, truncating any over some maximum length | entailment |
def summarize_sequence_file(source_file, file_type=None):
"""
Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences.
"""
is_alignment = True
avg_length = None
min_length = sys.maxsize
max_length = 0
sequence_count = 0
# Get an iterator and analyze the data.
with common.FileType('rt')(source_file) as fp:
if not file_type:
file_type = fileformat.from_handle(fp)
for record in SeqIO.parse(fp, file_type):
sequence_count += 1
sequence_length = len(record)
if max_length != 0:
# If even one sequence is not the same length as the others,
# we don't consider this an alignment.
if sequence_length != max_length:
is_alignment = False
# Lengths
if sequence_length > max_length:
max_length = sequence_length
if sequence_length < min_length:
min_length = sequence_length
# Average length
if sequence_count == 1:
avg_length = float(sequence_length)
else:
avg_length = avg_length + ((sequence_length - avg_length) /
sequence_count)
# Handle an empty file:
if avg_length is None:
min_length = max_length = avg_length = 0
if sequence_count <= 1:
is_alignment = False
return (source_file, str(is_alignment).upper(), min_length,
max_length, avg_length, sequence_count) | Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences. | entailment |
def action(arguments):
"""
Given one more more sequence files, determine if the file is an alignment,
the maximum sequence length and the total number of sequences. Provides
different output formats including tab (tab-delimited), csv and align
(aligned as if part of a borderless table).
"""
# Ignore SIGPIPE, for head support
common.exit_on_sigpipe()
common.exit_on_sigint()
handle = arguments.destination_file
output_format = arguments.output_format
if not output_format:
try:
output_format = 'align' if handle.isatty() else 'tab'
except AttributeError:
output_format = 'tab'
writer_cls = _WRITERS[output_format]
ssf = partial(summarize_sequence_file, file_type = arguments.input_format)
# if only one thread, do not use the multithreading so parent process
# can be terminated using ctrl+c
if arguments.threads > 1:
pool = multiprocessing.Pool(processes=arguments.threads)
summary = pool.imap(ssf, arguments.source_files)
else:
summary = (ssf(f) for f in arguments.source_files)
with handle:
writer = writer_cls(arguments.source_files, summary, handle)
writer.write() | Given one more more sequence files, determine if the file is an alignment,
the maximum sequence length and the total number of sequences. Provides
different output formats including tab (tab-delimited), csv and align
(aligned as if part of a borderless table). | entailment |
def _record_buffer(records, buffer_size=DEFAULT_BUFFER_SIZE):
"""
Buffer for transform functions which require multiple passes through data.
Value returned by context manager is a function which returns an iterator
through records.
"""
with tempfile.SpooledTemporaryFile(buffer_size, mode='wb+') as tf:
pickler = pickle.Pickler(tf)
for record in records:
pickler.dump(record)
def record_iter():
tf.seek(0)
unpickler = pickle.Unpickler(tf)
while True:
try:
yield unpickler.load()
except EOFError:
break
yield record_iter | Buffer for transform functions which require multiple passes through data.
Value returned by context manager is a function which returns an iterator
through records. | entailment |
def dashes_cleanup(records, prune_chars='.:?~'):
"""
Take an alignment and convert any undesirable characters such as ? or ~ to
-.
"""
logging.info(
"Applying _dashes_cleanup: converting any of '{}' to '-'.".format(prune_chars))
translation_table = {ord(c): '-' for c in prune_chars}
for record in records:
record.seq = Seq(str(record.seq).translate(translation_table),
record.seq.alphabet)
yield record | Take an alignment and convert any undesirable characters such as ? or ~ to
-. | entailment |
def deduplicate_sequences(records, out_file):
"""
Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_sequences generator: '
'removing any duplicate records with identical sequences.')
checksum_sequences = collections.defaultdict(list)
for record in records:
checksum = seguid(record.seq)
sequences = checksum_sequences[checksum]
if not sequences:
yield record
sequences.append(record.id)
if out_file is not None:
with out_file:
for sequences in checksum_sequences.values():
out_file.write('%s\n' % (' '.join(sequences),)) | Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences. | entailment |
def deduplicate_taxa(records):
"""
Remove any duplicate records with identical IDs, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_taxa generator: ' + \
'removing any duplicate records with identical IDs.')
taxa = set()
for record in records:
# Default to full ID, split if | is found.
taxid = record.id
if '|' in record.id:
try:
taxid = int(record.id.split("|")[0])
except:
# If we couldn't parse an integer from the ID, just fall back
# on the ID
logging.warn("Unable to parse integer taxid from %s",
taxid)
if taxid in taxa:
continue
taxa.add(taxid)
yield record | Remove any duplicate records with identical IDs, keep the first
instance seen and discard additional occurences. | entailment |
def first_name_capture(records):
"""
Take only the first whitespace-delimited word as the name of the sequence.
Essentially removes any extra text from the sequence's description.
"""
logging.info('Applying _first_name_capture generator: '
'making sure ID only contains the first whitespace-delimited '
'word.')
whitespace = re.compile(r'\s+')
for record in records:
if whitespace.search(record.description):
yield SeqRecord(record.seq, id=record.id,
description="")
else:
yield record | Take only the first whitespace-delimited word as the name of the sequence.
Essentially removes any extra text from the sequence's description. | entailment |
def include_from_file(records, handle):
"""
Filter the records, keeping only sequences whose ID is contained in the
handle.
"""
ids = set(i.strip() for i in handle)
for record in records:
if record.id.strip() in ids:
yield record | Filter the records, keeping only sequences whose ID is contained in the
handle. | entailment |
def isolate_region(sequences, start, end, gap_char='-'):
"""
Replace regions before and after start:end with gap chars
"""
# Check arguments
if end <= start:
raise ValueError("start of slice must precede end ({0} !> {1})".format(
end, start))
for sequence in sequences:
seq = sequence.seq
start_gap = gap_char * start
end_gap = gap_char * (len(seq) - end)
seq = Seq(start_gap + str(seq[start:end]) + end_gap,
alphabet=seq.alphabet)
sequence.seq = seq
yield sequence | Replace regions before and after start:end with gap chars | entailment |
def drop_columns(records, slices):
"""
Drop all columns present in ``slices`` from records
"""
for record in records:
# Generate a set of indices to remove
drop = set(i for slice in slices
for i in range(*slice.indices(len(record))))
keep = [i not in drop for i in range(len(record))]
record.seq = Seq(''.join(itertools.compress(record.seq, keep)), record.seq.alphabet)
yield record | Drop all columns present in ``slices`` from records | entailment |
def cut_sequences_relative(records, slices, record_id):
"""
Cuts records to slices, indexed by non-gap positions in record_id
"""
with _record_buffer(records) as r:
try:
record = next(i for i in r() if i.id == record_id)
except StopIteration:
raise ValueError("Record with id {0} not found.".format(record_id))
new_slices = _update_slices(record, slices)
for record in multi_cut_sequences(r(), new_slices):
yield record | Cuts records to slices, indexed by non-gap positions in record_id | entailment |
def multi_mask_sequences(records, slices):
"""
Replace characters sliced by slices with gap characters.
"""
for record in records:
record_indices = list(range(len(record)))
keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]),
slices, frozenset(record_indices))
seq = ''.join(b if i in keep_indices else '-'
for i, b in enumerate(str(record.seq)))
record.seq = Seq(seq)
yield record | Replace characters sliced by slices with gap characters. | entailment |
def prune_empty(records):
"""
Remove any sequences which are entirely gaps ('-')
"""
for record in records:
if not all(c == '-' for c in str(record.seq)):
yield record | Remove any sequences which are entirely gaps ('-') | entailment |
def _reverse_annotations(old_record, new_record):
"""
Copy annotations form old_record to new_record, reversing any
lists / tuples / strings.
"""
# Copy the annotations over
for k, v in list(old_record.annotations.items()):
# Trim if appropriate
if isinstance(v, (tuple, list)) and len(v) == len(old_record):
assert len(v) == len(old_record)
v = v[::-1]
new_record.annotations[k] = v
# Letter annotations must be lists / tuples / strings of the same
# length as the sequence
for k, v in list(old_record.letter_annotations.items()):
assert len(v) == len(old_record)
new_record.letter_annotations[k] = v[::-1] | Copy annotations form old_record to new_record, reversing any
lists / tuples / strings. | entailment |
def reverse_sequences(records):
"""
Reverse the order of sites in sequences.
"""
logging.info('Applying _reverse_sequences generator: '
'reversing the order of sites in sequences.')
for record in records:
rev_record = SeqRecord(record.seq[::-1], id=record.id,
name=record.name,
description=record.description)
# Copy the annotations over
_reverse_annotations(record, rev_record)
yield rev_record | Reverse the order of sites in sequences. | entailment |
def reverse_complement_sequences(records):
"""
Transform sequences into reverse complements.
"""
logging.info('Applying _reverse_complement_sequences generator: '
'transforming sequences into reverse complements.')
for record in records:
rev_record = SeqRecord(record.seq.reverse_complement(),
id=record.id, name=record.name,
description=record.description)
# Copy the annotations over
_reverse_annotations(record, rev_record)
yield rev_record | Transform sequences into reverse complements. | entailment |
def ungap_sequences(records, gap_chars=GAP_TABLE):
"""
Remove gaps from sequences, given an alignment.
"""
logging.info('Applying _ungap_sequences generator: removing all gap characters')
for record in records:
yield ungap_all(record, gap_chars) | Remove gaps from sequences, given an alignment. | entailment |
def _update_id(record, new_id):
"""
Update a record id to new_id, also modifying the ID in record.description
"""
old_id = record.id
record.id = new_id
# At least for FASTA, record ID starts the description
record.description = re.sub('^' + re.escape(old_id), new_id, record.description)
return record | Update a record id to new_id, also modifying the ID in record.description | entailment |
def name_append_suffix(records, suffix):
"""
Given a set of sequences, append a suffix for each sequence's name.
"""
logging.info('Applying _name_append_suffix generator: '
'Appending suffix ' + suffix + ' to all '
'sequence IDs.')
for record in records:
new_id = record.id + suffix
_update_id(record, new_id)
yield record | Given a set of sequences, append a suffix for each sequence's name. | entailment |
def name_insert_prefix(records, prefix):
"""
Given a set of sequences, insert a prefix for each sequence's name.
"""
logging.info('Applying _name_insert_prefix generator: '
'Inserting prefix ' + prefix + ' for all '
'sequence IDs.')
for record in records:
new_id = prefix + record.id
_update_id(record, new_id)
yield record | Given a set of sequences, insert a prefix for each sequence's name. | entailment |
def name_include(records, filter_regex):
"""
Given a set of sequences, filter out any sequences with names
that do not match the specified regular expression. Ignore case.
"""
logging.info('Applying _name_include generator: '
'including only IDs matching ' + filter_regex +
' in results.')
regex = re.compile(filter_regex)
for record in records:
if regex.search(record.id) or regex.search(record.description):
yield record | Given a set of sequences, filter out any sequences with names
that do not match the specified regular expression. Ignore case. | entailment |
def name_replace(records, search_regex, replace_pattern):
"""
Given a set of sequences, replace all occurrences of search_regex
with replace_pattern. Ignore case.
If the ID and the first word of the description match, assume the
description is FASTA-like and apply the transform to the entire
description, then set the ID from the first word. If the ID and
the first word of the description do not match, apply the transform
to each individually.
"""
regex = re.compile(search_regex)
for record in records:
maybe_id = record.description.split(None, 1)[0]
if maybe_id == record.id:
record.description = regex.sub(replace_pattern, record.description)
record.id = record.description.split(None, 1)[0]
else:
record.id = regex.sub(replace_pattern, record.id)
record.description = regex.sub(replace_pattern, record.description)
yield record | Given a set of sequences, replace all occurrences of search_regex
with replace_pattern. Ignore case.
If the ID and the first word of the description match, assume the
description is FASTA-like and apply the transform to the entire
description, then set the ID from the first word. If the ID and
the first word of the description do not match, apply the transform
to each individually. | entailment |
def seq_include(records, filter_regex):
"""
Filter any sequences who's seq does not match the filter. Ignore case.
"""
regex = re.compile(filter_regex)
for record in records:
if regex.search(str(record.seq)):
yield record | Filter any sequences who's seq does not match the filter. Ignore case. | entailment |
def sample(records, k, random_seed=None):
"""Choose a length-``k`` subset of ``records``, retaining the input
order. If k > len(records), all are returned. If an integer
``random_seed`` is provided, sets ``random.seed()``
"""
if random_seed is not None:
random.seed(random_seed)
result = []
for i, record in enumerate(records):
if len(result) < k:
result.append(record)
else:
r = random.randint(0, i)
if r < k:
result[r] = record
return result | Choose a length-``k`` subset of ``records``, retaining the input
order. If k > len(records), all are returned. If an integer
``random_seed`` is provided, sets ``random.seed()`` | entailment |
def head(records, head):
"""
Limit results to the top N records.
With the leading `-', print all but the last N records.
"""
logging.info('Applying _head generator: '
'limiting results to top ' + head + ' records.')
if head == '-0':
for record in records:
yield record
elif '-' in head:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
end_index = max(record_count + int(head), 0)
for record in itertools.islice(r(), end_index):
yield record
else:
for record in itertools.islice(records, int(head)):
yield record | Limit results to the top N records.
With the leading `-', print all but the last N records. | entailment |
def tail(records, tail):
"""
Limit results to the bottom N records.
Use +N to output records starting with the Nth.
"""
logging.info('Applying _tail generator: '
'limiting results to top ' + tail + ' records.')
if tail == '+0':
for record in records:
yield record
elif '+' in tail:
tail = int(tail) - 1
for record in itertools.islice(records, tail, None):
yield record
else:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
start_index = max(record_count - int(tail), 0)
for record in itertools.islice(r(), start_index, None):
yield record | Limit results to the bottom N records.
Use +N to output records starting with the Nth. | entailment |
def gap_proportion(sequences, gap_chars='-'):
"""
Generates a list with the proportion of gaps by index in a set of
sequences.
"""
aln_len = None
gaps = []
for i, sequence in enumerate(sequences):
if aln_len is None:
aln_len = len(sequence)
gaps = [0] * aln_len
else:
if not len(sequence) == aln_len:
raise ValueError(("Unexpected sequence length {0}. Is this "
"an alignment?").format(len(sequence)))
# Update any gap positions in gap list
for j, char in enumerate(sequence.seq):
if char in gap_chars:
gaps[j] += 1
sequence_count = float(i + 1)
gap_props = [i / sequence_count for i in gaps]
return gap_props | Generates a list with the proportion of gaps by index in a set of
sequences. | entailment |
def squeeze(records, gap_threshold=1.0):
"""
Remove any gaps that are present in the same position across all sequences
in an alignment. Takes a second sequence iterator for determining gap
positions.
"""
with _record_buffer(records) as r:
gap_proportions = gap_proportion(r())
keep_columns = [g < gap_threshold for g in gap_proportions]
for record in r():
sequence = str(record.seq)
# Trim
squeezed = itertools.compress(sequence, keep_columns)
yield SeqRecord(Seq(''.join(squeezed)), id=record.id,
description=record.description) | Remove any gaps that are present in the same position across all sequences
in an alignment. Takes a second sequence iterator for determining gap
positions. | entailment |
def strip_range(records):
"""
Cut off trailing /<start>-<stop> ranges from IDs. Ranges must be 1-indexed and
the stop integer must not be less than the start integer.
"""
logging.info('Applying _strip_range generator: '
'removing /<start>-<stop> ranges from IDs')
# Split up and be greedy.
cut_regex = re.compile(r"(?P<id>.*)\/(?P<start>\d+)\-(?P<stop>\d+)")
for record in records:
name = record.id
match = cut_regex.match(str(record.id))
if match:
sequence_id = match.group('id')
start = int(match.group('start'))
stop = int(match.group('stop'))
if start > 0 and start <= stop:
name = sequence_id
yield SeqRecord(record.seq, id=name,
description='') | Cut off trailing /<start>-<stop> ranges from IDs. Ranges must be 1-indexed and
the stop integer must not be less than the start integer. | entailment |
def transcribe(records, transcribe):
"""
Perform transcription or back-transcription.
transcribe must be one of the following:
dna2rna
rna2dna
"""
logging.info('Applying _transcribe generator: '
'operation to perform is ' + transcribe + '.')
for record in records:
sequence = str(record.seq)
description = record.description
name = record.id
if transcribe == 'dna2rna':
dna = Seq(sequence, IUPAC.ambiguous_dna)
rna = dna.transcribe()
yield SeqRecord(rna, id=name, description=description)
elif transcribe == 'rna2dna':
rna = Seq(sequence, IUPAC.ambiguous_rna)
dna = rna.back_transcribe()
yield SeqRecord(dna, id=name, description=description) | Perform transcription or back-transcription.
transcribe must be one of the following:
dna2rna
rna2dna | entailment |
def translate(records, translate):
"""
Perform translation from generic DNA/RNA to proteins. Bio.Seq
does not perform back-translation because the codons would
more-or-less be arbitrary. Option to translate only up until
reaching a stop codon. translate must be one of the following:
dna2protein
dna2proteinstop
rna2protein
rna2proteinstop
"""
logging.info('Applying translation generator: '
'operation to perform is ' + translate + '.')
to_stop = translate.endswith('stop')
source_type = translate[:3]
alphabet = {'dna': IUPAC.ambiguous_dna, 'rna': IUPAC.ambiguous_rna}[source_type]
# Get a translation table
table = {'dna': CodonTable.ambiguous_dna_by_name["Standard"],
'rna': CodonTable.ambiguous_rna_by_name["Standard"]}[source_type]
# Handle ambiguities by replacing ambiguous codons with 'X'
# TODO: this copy operation causes infinite recursion with python3.6 -
# not sure why it was here to begin with.
# table = copy.deepcopy(table)
table.forward_table = CodonWarningTable(table.forward_table)
for record in records:
sequence = str(record.seq)
seq = Seq(sequence, alphabet)
protein = seq.translate(table, to_stop=to_stop)
yield SeqRecord(protein, id=record.id, description=record.description) | Perform translation from generic DNA/RNA to proteins. Bio.Seq
does not perform back-translation because the codons would
more-or-less be arbitrary. Option to translate only up until
reaching a stop codon. translate must be one of the following:
dna2protein
dna2proteinstop
rna2protein
rna2proteinstop | entailment |
def max_length_discard(records, max_length):
"""
Discard any records that are longer than max_length.
"""
logging.info('Applying _max_length_discard generator: '
'discarding records longer than '
'.')
for record in records:
if len(record) > max_length:
# Discard
logging.debug('Discarding long sequence: %s, length=%d',
record.id, len(record))
else:
yield record | Discard any records that are longer than max_length. | entailment |
def min_length_discard(records, min_length):
"""
Discard any records that are shorter than min_length.
"""
logging.info('Applying _min_length_discard generator: '
'discarding records shorter than %d.', min_length)
for record in records:
if len(record) < min_length:
logging.debug('Discarding short sequence: %s, length=%d',
record.id, len(record))
else:
yield record | Discard any records that are shorter than min_length. | entailment |
def min_ungap_length_discard(records, min_length):
"""
Discard any records that are shorter than min_length after removing gaps.
"""
for record in records:
if len(ungap_all(record)) >= min_length:
yield record | Discard any records that are shorter than min_length after removing gaps. | entailment |
def sort_length(source_file, source_file_type, direction=1):
"""
Sort sequences by length. 1 is ascending (default) and 0 is descending.
"""
direction_text = 'ascending' if direction == 1 else 'descending'
logging.info('Indexing sequences by length: %s', direction_text)
# Adapted from the Biopython tutorial example.
# Get the lengths and ids, and sort on length
len_and_ids = sorted((len(rec), rec.id)
for rec in SeqIO.parse(source_file, source_file_type))
if direction == 0:
ids = reversed([seq_id for (length, seq_id) in len_and_ids])
else:
ids = [seq_id for (length, seq_id) in len_and_ids]
del len_and_ids # free this memory
# SeqIO.index does not handle gzip instances
if isinstance(source_file, gzip.GzipFile):
tmpfile = tempfile.NamedTemporaryFile()
source_file.seek(0)
tmpfile.write(source_file.read())
tmpfile.seek(0)
source_file = tmpfile
record_index = SeqIO.index(source_file.name, source_file_type)
for seq_id in ids:
yield record_index[seq_id] | Sort sequences by length. 1 is ascending (default) and 0 is descending. | entailment |
def batch(iterable, chunk_size):
"""
Return items from iterable in chunk_size bits.
If len(iterable) % chunk_size > 0, the last item returned will be shorter.
"""
i = iter(iterable)
while True:
r = list(itertools.islice(i, chunk_size))
if not r:
break
yield r | Return items from iterable in chunk_size bits.
If len(iterable) % chunk_size > 0, the last item returned will be shorter. | entailment |
def action(arguments):
"""
Run
"""
# Ignore SIGPIPE, for head support
common.exit_on_sigpipe()
logging.basicConfig()
prot_sequences = SeqIO.parse(arguments.protein_align,
fileformat.from_handle(arguments.protein_align))
nucl_sequences = SeqIO.parse(arguments.nucl_align,
fileformat.from_handle(arguments.nucl_align))
instance = AlignmentMapper(TRANSLATION_TABLES[arguments.translation_table],
arguments.fail_action)
SeqIO.write(instance.map_all(prot_sequences, nucl_sequences),
arguments.out_file, fileformat.from_filename(arguments.out_file.name)) | Run | entailment |
def _validate_translation(self, aligned_prot, aligned_nucl):
"""
Given a seq for protein and nucleotide, ensure that the translation holds
"""
codons = [''.join(i) for i in batch(str(aligned_nucl), 3)]
for codon, aa in zip(codons, str(aligned_prot)):
# Check gaps
if codon == '---' and aa == '-':
continue
try:
trans = self.translation_table.forward_table[codon]
if not trans == aa:
raise ValueError("Codon {0} translates to {1}, not {2}".format(
codon, trans, aa))
except (KeyError, CodonTable.TranslationError):
if aa != 'X':
if self.unknown_action == 'fail':
raise ValueError("Unknown codon: {0} mapped to {1}".format(
codon, aa))
elif self.unknown_action == 'warn':
logging.warn('Cannot verify that unknown codon %s '
'maps to %s', codon, aa)
return True | Given a seq for protein and nucleotide, ensure that the translation holds | entailment |
def map_alignment(self, prot_seq, nucl_seq):
"""
Use aligned prot_seq to align nucl_seq
"""
if prot_seq.id != nucl_seq.id:
logging.warning(
'ID mismatch: %s != %s. Are the sequences in the same order?',
prot_seq.id, nucl_seq.id)
# Ungap nucleotides
codons = batch(str(nucl_seq.seq.ungap('-')), 3)
codons = [''.join(i) for i in codons]
codon_iter = iter(codons)
ungapped_prot = str(prot_seq.seq).replace('-', '')
if len(ungapped_prot) != len(codons):
table = self.translation_table.forward_table
prot_str = ' '.join(' ' + p + ' ' for p in ungapped_prot)
codon_str = ' '.join(codons)
trans_str = ' '.join(' ' + table.get(codon, 'X') + ' '
for codon in codons)
raise ValueError("""Length of codon sequence ({0}) does not match \
length of protein sequence ({1}) for {2}
Protein: {3}
Codons: {4}
Trans. Codons: {5}""".format(len(codons), len(ungapped_prot), nucl_seq.id, prot_str,
codon_str, trans_str))
try:
nucl_align = ['---' if p == '-' else next(codon_iter)
for p in str(prot_seq.seq)]
except StopIteration:
assert False # Should be checked above
result = SeqRecord(Seq(''.join(nucl_align)), id=nucl_seq.id,
description=nucl_seq.description)
# Validate
self._validate_translation(prot_seq.seq.upper(), result.seq.upper())
return result | Use aligned prot_seq to align nucl_seq | entailment |
def map_all(self, prot_alignment, nucl_sequences):
"""
Convert protein sequences to nucleotide alignment
"""
zipped = itertools.zip_longest(prot_alignment, nucl_sequences)
for p, n in zipped:
if p is None:
raise ValueError("Exhausted protein sequences")
elif n is None:
raise ValueError("Exhausted nucleotide sequences")
yield self.map_alignment(p, n) | Convert protein sequences to nucleotide alignment | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.