_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q36600 | ApiServiceSetupInfo.add_role_type_info | train | def add_role_type_info(self, role_type, config):
"""
Add a role type setup info.
@param role_type: Role type
@param config: A dictionary of role type configuration
"""
rt_config = config_to_api_list(config)
rt_config['roleType'] = role_type
if self.config is None:
self.config = { }
if not self.config.has_key(ROLETYPES_CFG_KEY):
self.config[ROLETYPES_CFG_KEY] = [ ]
self.config[ROLETYPES_CFG_KEY].append(rt_config) | python | {
"resource": ""
} |
q36601 | ApiServiceSetupInfo.add_role_info | train | def add_role_info(self, role_name, role_type, host_id, config=None):
"""
Add a role info. The role will be created along with the service setup.
@param role_name: Role name
@param role_type: Role type
@param host_id: The host where the role should run
@param config: (Optional) A dictionary of role config values
"""
if self.roles is None:
self.roles = [ ]
api_config_list = config is not None and config_to_api_list(config) or None
self.roles.append({
'name' : role_name,
'type' : role_type,
'hostRef' : { 'hostId' : host_id },
'config' : api_config_list }) | python | {
"resource": ""
} |
q36602 | create_host | train | def create_host(resource_root, host_id, name, ipaddr, rack_id=None):
"""
Create a host
@param resource_root: The root Resource object.
@param host_id: Host id
@param name: Host name
@param ipaddr: IP address
@param rack_id: Rack id. Default None
@return: An ApiHost object
"""
apihost = ApiHost(resource_root, host_id, name, ipaddr, rack_id)
return call(resource_root.post, HOSTS_PATH, ApiHost, True, data=[apihost])[0] | python | {
"resource": ""
} |
q36603 | get_all_hosts | train | def get_all_hosts(resource_root, view=None):
"""
Get all hosts
@param resource_root: The root Resource object.
@return: A list of ApiHost objects.
"""
return call(resource_root.get, HOSTS_PATH, ApiHost, True,
params=view and dict(view=view) or None) | python | {
"resource": ""
} |
q36604 | ApiHost.enter_maintenance_mode | train | def enter_maintenance_mode(self):
"""
Put the host in maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('enterMaintenanceMode')
if cmd.success:
self._update(get_host(self._get_resource_root(), self.hostId))
return cmd | python | {
"resource": ""
} |
q36605 | ApiHost.migrate_roles | train | def migrate_roles(self, role_names_to_migrate, destination_host_id,
clear_stale_role_data):
"""
Migrate roles from this host to a different host.
Currently, this command applies only to HDFS NameNode, JournalNode,
and Failover Controller roles. In order to migrate these roles:
- HDFS High Availability must be enabled, using quorum-based storage.
- HDFS must not be configured to use a federated nameservice.
I{B{Migrating a NameNode role requires cluster downtime.}} HDFS, along
with all of its dependent services, will be stopped at the beginning
of the migration process, and restarted at its conclusion.
If the active NameNode is selected for migration, a manual failover
will be performed before the role is migrated. The role will remain in
standby mode after the migration is complete.
When migrating a NameNode role, the co-located Failover Controller
role must be migrated as well. The Failover Controller role name must
be included in the list of role names to migrate specified in the
arguments to this command (it will not be included implicitly). This
command does not allow a Failover Controller role to be moved by itself,
although it is possible to move a JournalNode independently.
@param role_names_to_migrate: list of role names to migrate.
@param destination_host_id: the id of the host to which the roles
should be migrated.
@param clear_stale_role_data: true to delete existing stale role data,
if any. For example, when migrating a
NameNode, if the destination host has
stale data in the NameNode data
directories (possibly because a NameNode
role was previously located there), this
stale data will be deleted before migrating
the role.
@return: Reference to the submitted command.
@since: API v10
"""
args = dict(
roleNamesToMigrate = role_names_to_migrate,
destinationHostId = destination_host_id,
clearStaleRoleData = clear_stale_role_data)
return self._cmd('migrateRoles', data=args, api_version=10) | python | {
"resource": ""
} |
q36606 | get_host_map | train | def get_host_map(root):
''' Gets a mapping between CM hostId and Nagios host information
The key is the CM hostId
The value is an object containing the Nagios hostname and host address
'''
hosts_map = {}
for host in root.get_all_hosts():
hosts_map[host.hostId] = {"hostname": NAGIOS_HOSTNAME_FORMAT % (host.hostname,),
"address": host.ipAddress}
''' Also define "virtual hosts" for the CM clusters- they will be the hosts
to which CM services are mapped
'''
for cluster in root.get_all_clusters():
hosts_map[cluster.name] = {"hostname": cluster.name,
"address": quote(cluster.name)}
hosts_map[CM_DUMMY_HOST] = {"hostname": CM_DUMMY_HOST,
"address": CM_DUMMY_HOST}
return hosts_map | python | {
"resource": ""
} |
q36607 | get_services | train | def get_services(root, hosts_map, view=None):
''' Gets a list of objects representing the Nagios services.
Each object contains the Nagios hostname, service name, service display
name, and service health summary.
'''
services_list = []
mgmt_service = root.get_cloudera_manager().get_service()
services_list.append({"hostname": CM_DUMMY_HOST,
"name": mgmt_service.name,
"display_name": "CM Managed Service: %s" % (mgmt_service.name,),
"status": get_status(mgmt_service),
"url": mgmt_service.serviceUrl,
"health_summary": mgmt_service.healthSummary})
for cm_role in root.get_cloudera_manager().get_service().get_all_roles(view):
services_list.append({"hostname": hosts_map[cm_role.hostRef.hostId]["hostname"],
"name": cm_role.name,
"display_name": "CM Management Service: %s" % (cm_role.name,),
"status": get_status(cm_role),
"url": cm_role.roleUrl,
"health_summary": cm_role.healthSummary})
for cm_host in root.get_all_hosts(view):
services_list.append({"hostname": hosts_map[cm_host.hostId]["hostname"],
"name": "cm-host-%s" % (cm_host.hostname,),
"display_name": "CM Managed Host: %s" % (cm_host.hostname,),
"status": get_status(cm_host),
"url": cm_host.hostUrl,
"health_summary": cm_host.healthSummary})
for cluster in root.get_all_clusters(view):
for service in cluster.get_all_services(view):
services_list.append({"hostname": cluster.name,
"name": service.name,
"display_name": "CM Managed Service: %s" % (service.name,),
"status": get_status(service),
"url": service.serviceUrl,
"health_summary": service.healthSummary})
for role in service.get_all_roles(view):
services_list.append({"hostname": hosts_map[role.hostRef.hostId]["hostname"],
"name": role.name,
"display_name": "%s:%s" % (cluster.name, role.name,),
"status": get_status(role),
"url": role.roleUrl,
"health_summary": role.healthSummary})
return services_list | python | {
"resource": ""
} |
q36608 | submit_status_external_cmd | train | def submit_status_external_cmd(cmd_file, status_file):
''' Submits the status lines in the status_file to Nagios' external cmd file.
'''
try:
with open(cmd_file, 'a') as cmd_file:
cmd_file.write(status_file.read())
except IOError:
exit("Fatal error: Unable to write to Nagios external command file '%s'.\n"
"Make sure that the file exists and is writable." % (cmd_file,)) | python | {
"resource": ""
} |
q36609 | Resource.invoke | train | def invoke(self, method, relpath=None, params=None, data=None, headers=None):
"""
Invoke an API method.
@return: Raw body or JSON dictionary (if response content type is JSON).
"""
path = self._join_uri(relpath)
resp = self._client.execute(method,
path,
params=params,
data=data,
headers=headers)
try:
body = resp.read()
except Exception, ex:
raise Exception("Command '%s %s' failed: %s" %
(method, path, ex))
self._client.logger.debug(
"%s Got response: %s%s" %
(method, body[:32], len(body) > 32 and "..." or ""))
# Is the response application/json?
if len(body) != 0 and \
resp.info().getmaintype() == "application" and \
resp.info().getsubtype() == "json":
try:
json_dict = json.loads(body)
return json_dict
except Exception, ex:
self._client.logger.exception('JSON decode error: %s' % (body,))
raise ex
else:
return body | python | {
"resource": ""
} |
q36610 | create_host_template | train | def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0] | python | {
"resource": ""
} |
q36611 | get_host_template | train | def get_host_template(resource_root, name, cluster_name):
"""
Lookup a host template by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: An ApiHostTemplate object.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, api_version=3) | python | {
"resource": ""
} |
q36612 | get_all_host_templates | train | def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3) | python | {
"resource": ""
} |
q36613 | delete_host_template | train | def delete_host_template(resource_root, name, cluster_name):
"""
Delete a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: The deleted ApiHostTemplate object.
@since: API v3
"""
return call(resource_root.delete,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, api_version=3) | python | {
"resource": ""
} |
q36614 | update_host_template | train | def update_host_template(resource_root, name, cluster_name, api_host_template):
"""
Update a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param api_host_template: The updated host template.
@return: The updated ApiHostTemplate.
@since: API v3
"""
return call(resource_root.put,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, data=api_host_template, api_version=3) | python | {
"resource": ""
} |
q36615 | ApiHostTemplate.rename | train | def rename(self, new_name):
"""
Rename a host template.
@param new_name: New host template name.
@return: An ApiHostTemplate object.
"""
update = copy.copy(self)
update.name = new_name
return self._do_update(update) | python | {
"resource": ""
} |
q36616 | ApiHostTemplate.set_role_config_groups | train | def set_role_config_groups(self, role_config_group_refs):
"""
Updates the role config groups in a host template.
@param role_config_group_refs: List of role config group refs.
@return: An ApiHostTemplate object.
"""
update = copy.copy(self)
update.roleConfigGroupRefs = role_config_group_refs
return self._do_update(update) | python | {
"resource": ""
} |
q36617 | list_supported_categories | train | def list_supported_categories():
"""
Prints a list of supported external account category names.
For example, "AWS" is a supported external account category name.
"""
categories = get_supported_categories(api)
category_names = [category.name for category in categories]
print ("Supported account categories by name: {0}".format(
COMMA_WITH_SPACE.join(map(str, category_names)))) | python | {
"resource": ""
} |
q36618 | list_supported_types | train | def list_supported_types(category_name):
"""
Prints a list of supported external account type names for the given
category_name. For example, "AWS_ACCESS_KEY_AUTH" is a supported external
account type name for external account category "AWS".
"""
types = get_supported_types(api, category_name)
type_names = [type.name for type in types]
print ("Supported account types by name for '{0}': [{1}]".format(
category_name, COMMA_WITH_SPACE.join(map(str, type_names)))) | python | {
"resource": ""
} |
q36619 | list_credentials_by_name | train | def list_credentials_by_name(type_name):
"""
Prints a list of available credential names for the given type_name.
"""
accounts = get_all_external_accounts(api, type_name)
account_names = [account.name for account in accounts]
print ("List of credential names for '{0}': [{1}]".format(
type_name, COMMA_WITH_SPACE.join(map(str, account_names)))) | python | {
"resource": ""
} |
q36620 | call_s3guard_prune | train | def call_s3guard_prune(credential_name):
"""
Runs S3Guard prune command on external account associated with the
given credential_name.
""" # Get the AWS credential account associated with the credential
account = get_external_account(api, credential_name)
# Invoke the prune command for the account by its name
cmd = account.external_account_cmd_by_name('S3GuardPrune')
print ("Issued '{0}' command with id '{1}'".format(cmd.name, cmd.id))
print ("Waiting for command {0} to finish...".format(cmd.id))
cmd = cmd.wait()
print ("Command succeeded: {0}".format(cmd.success)) | python | {
"resource": ""
} |
q36621 | initialize_api | train | def initialize_api(args):
"""
Initializes the global API instance using the given arguments.
@param args: arguments provided to the script.
"""
global api
api = ApiResource(server_host=args.hostname, server_port=args.port,
username=args.username, password=args.password,
version=args.api_version, use_tls=args.use_tls) | python | {
"resource": ""
} |
q36622 | validate_api_compatibility | train | def validate_api_compatibility(args):
"""
Validates the API version.
@param args: arguments provided to the script.
"""
if args.api_version and args.api_version < MINIMUM_SUPPORTED_API_VERSION:
print("ERROR: Given API version: {0}. Minimum supported API version: {1}"
.format(args.api_version, MINIMUM_SUPPORTED_API_VERSION)) | python | {
"resource": ""
} |
q36623 | get_login_credentials | train | def get_login_credentials(args):
"""
Gets the login credentials from the user, if not specified while invoking
the script.
@param args: arguments provided to the script.
"""
if not args.username:
args.username = raw_input("Enter Username: ")
if not args.password:
args.password = getpass.getpass("Enter Password: ") | python | {
"resource": ""
} |
q36624 | main | train | def main():
"""
The "main" entry that controls the flow of the script based
on the provided arguments.
"""
setup_logging(logging.INFO)
# Parse arguments
parser = argparse.ArgumentParser(
description="A utility to interact with AWS using Cloudera Manager.")
parser.add_argument('-H', '--hostname', action='store', dest='hostname',
required=True,
help='The hostname of the Cloudera Manager server.')
parser.add_argument('-p', action='store', dest='port', type=int,
help='The port of the Cloudera Manager server. Defaults '
'to 7180 (http) or 7183 (https).')
parser.add_argument('-u', '--username', action='store', dest='username',
help='Login name.')
parser.add_argument('--password', action='store', dest='password',
help='Login password.')
parser.add_argument('--api-version', action='store', dest='api_version',
type=int,
default=MINIMUM_SUPPORTED_API_VERSION,
help='API version to be used. Defaults to {0}.'.format(
MINIMUM_SUPPORTED_API_VERSION))
parser.add_argument('--tls', action='store_const', dest='use_tls',
const=True, default=False,
help='Whether to use tls (https).')
parser.add_argument('-c', '--show-categories', action='store_true',
default=False, dest='show_categories',
help='Prints a list of supported external account '
'category names. For example, "AWS" is a supported '
'external account category name.')
parser.add_argument('-t', '--show-types', action='store',
dest='category_name',
help='Prints a list of supported external account type '
'names for the given CATEGORY_NAME. For example, '
'"AWS_ACCESS_KEY_AUTH" is a supported external '
'account type name for external account category '
'"AWS".')
parser.add_argument('-n', '--show-credentials', action='store',
dest='type_name',
help='Prints a list of available credential names for '
'the given TYPE_NAME.')
parser.add_argument('--prune', action='store', dest='credential_name',
help='Runs S3Guard prune command on external account '
'associated with the given CREDENTIAL_NAME.')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
# Use the default port if required.
if not args.port:
if args.use_tls:
args.port = DEFAULT_HTTPS_PORT
else:
args.port = DEFAULT_HTTP_PORT
validate_api_compatibility(args)
get_login_credentials(args)
initialize_api(args)
# Perform the AWS operation based on the input arguments.
if args.show_categories:
list_supported_categories()
elif args.category_name:
list_supported_types(args.category_name)
elif args.type_name:
list_credentials_by_name(args.type_name)
elif args.credential_name:
call_s3guard_prune(args.credential_name)
else:
print ("ERROR: No arguments given to perform any AWS operation.")
parser.print_help()
sys.exit(1) | python | {
"resource": ""
} |
q36625 | get_root_resource | train | def get_root_resource(server_host, server_port=None,
username="admin", password="admin",
use_tls=False, version=API_CURRENT_VERSION):
"""
See ApiResource.
"""
return ApiResource(server_host, server_port, username, password, use_tls,
version) | python | {
"resource": ""
} |
q36626 | ApiResource.create_cluster | train | def create_cluster(self, name, version=None, fullVersion=None):
"""
Create a new cluster.
@param name: Cluster name.
@param version: Cluster major CDH version, e.g. 'CDH5'. Ignored if
fullVersion is specified.
@param fullVersion: Complete CDH version, e.g. '5.1.2'. Overrides major
version if both specified.
@return: The created cluster.
"""
return clusters.create_cluster(self, name, version, fullVersion) | python | {
"resource": ""
} |
q36627 | ApiResource.create_host | train | def create_host(self, host_id, name, ipaddr, rack_id = None):
"""
Create a host.
@param host_id: The host id.
@param name: Host name
@param ipaddr: IP address
@param rack_id: Rack id. Default None.
@return: An ApiHost object
"""
return hosts.create_host(self, host_id, name, ipaddr, rack_id) | python | {
"resource": ""
} |
q36628 | ApiResource.get_metrics | train | def get_metrics(self, path, from_time, to_time, metrics, view, params=None):
"""
Generic function for querying metrics.
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param view: View to materialize ('full' or 'summary')
@param params: Other query parameters.
@return: List of metrics and their readings.
"""
if not params:
params = { }
if from_time:
params['from'] = from_time.isoformat()
if to_time:
params['to'] = to_time.isoformat()
if metrics:
params['metrics'] = metrics
if view:
params['view'] = view
resp = self.get(path, params=params)
return types.ApiList.from_json_dict(resp, self, types.ApiMetric) | python | {
"resource": ""
} |
q36629 | ApiResource.query_timeseries | train | def query_timeseries(self, query, from_time=None, to_time=None, by_post=False):
"""
Query time series.
@param query: Query string.
@param from_time: Start of the period to query (optional).
@param to_time: End of the period to query (default = now).
@return: A list of ApiTimeSeriesResponse.
"""
return timeseries.query_timeseries(self, query, from_time, to_time, by_post=by_post) | python | {
"resource": ""
} |
q36630 | echo | train | def echo(root_resource, message):
"""Have the server echo our message back."""
params = dict(message=message)
return root_resource.get(ECHO_PATH, params) | python | {
"resource": ""
} |
q36631 | echo_error | train | def echo_error(root_resource, message):
"""Generate an error, but we get to set the error message."""
params = dict(message=message)
return root_resource.get(ECHO_ERROR_PATH, params) | python | {
"resource": ""
} |
q36632 | ClouderaShell.service_action | train | def service_action(self, service, action):
"Perform given action on service for the selected cluster"
try:
service = api.get_cluster(self.cluster).get_service(service)
except ApiException:
print("Service not found")
return None
if action == "start":
service.start()
if action == "restart":
service.restart()
if action == "stop":
service.stop()
return True | python | {
"resource": ""
} |
q36633 | ClouderaShell.cluster_autocomplete | train | def cluster_autocomplete(self, text, line, start_index, end_index):
"autocomplete for the use command, obtain list of clusters first"
if not self.CACHED_CLUSTERS:
clusters = [cluster.name for cluster in api.get_all_clusters()]
self.CACHED_CLUSTERS = clusters
if text:
return [cluster for cluster in self.CACHED_CLUSTERS if cluster.startswith(text)]
else:
return self.CACHED_CLUSTERS | python | {
"resource": ""
} |
q36634 | ClouderaShell.roles_autocomplete | train | def roles_autocomplete(self, text, line, start_index, end_index):
"Return full list of roles"
if '-' not in line:
# Append a dash to each service, makes for faster autocompletion of
# roles
return [s + '-' for s in self.services_autocomplete(text, line, start_index, end_index)]
else:
key, role = line.split()[1].split('-', 1)
if key not in self.CACHED_ROLES:
service = api.get_cluster(self.cluster).get_service(key)
roles = []
for t in service.get_role_types():
for r in service.get_roles_by_type(t):
roles.append(r.name)
self.CACHED_ROLES[key] = roles
if not role:
return self.CACHED_ROLES[key]
else:
return [r for r in self.CACHED_ROLES[key] if r.startswith(line.split()[1])] | python | {
"resource": ""
} |
q36635 | query_events | train | def query_events(resource_root, query_str=None):
"""
Search for events.
@param query_str: Query string.
@return: A list of ApiEvent.
"""
params = None
if query_str:
params = dict(query=query_str)
return call(resource_root.get, EVENTS_PATH, ApiEventQueryResult,
params=params) | python | {
"resource": ""
} |
q36636 | configure | train | def configure(config=None, bind_in_runtime=True):
"""Create an injector with a callable config or raise an exception when already configured."""
global _INJECTOR
with _INJECTOR_LOCK:
if _INJECTOR:
raise InjectorException('Injector is already configured')
_INJECTOR = Injector(config, bind_in_runtime=bind_in_runtime)
logger.debug('Created and configured an injector, config=%s', config)
return _INJECTOR | python | {
"resource": ""
} |
q36637 | configure_once | train | def configure_once(config=None, bind_in_runtime=True):
"""Create an injector with a callable config if not present, otherwise, do nothing."""
with _INJECTOR_LOCK:
if _INJECTOR:
return _INJECTOR
return configure(config, bind_in_runtime=bind_in_runtime) | python | {
"resource": ""
} |
q36638 | clear_and_configure | train | def clear_and_configure(config=None, bind_in_runtime=True):
"""Clear an existing injector and create another one with a callable config."""
with _INJECTOR_LOCK:
clear()
return configure(config, bind_in_runtime=bind_in_runtime) | python | {
"resource": ""
} |
q36639 | autoparams | train | def autoparams(*selected_args):
"""Return a decorator that will inject args into a function using type annotations, Python >= 3.5 only.
For example::
@inject.autoparams()
def refresh_cache(cache: RedisCache, db: DbInterface):
pass
There is an option to specify which arguments we want to inject without attempts of injecting everything:
For example::
@inject.autoparams('cache', 'db')
def sign_up(name, email, cache, db):
pass
"""
def autoparams_decorator(func):
if sys.version_info[:2] < (3, 5):
raise InjectorException('autoparams are supported from Python 3.5 onwards')
full_args_spec = inspect.getfullargspec(func)
annotations_items = full_args_spec.annotations.items()
all_arg_names = frozenset(full_args_spec.args + full_args_spec.kwonlyargs)
args_to_check = frozenset(selected_args) or all_arg_names
args_annotated_types = {
arg_name: annotated_type for arg_name, annotated_type in annotations_items
if arg_name in args_to_check
}
return _ParametersInjection(**args_annotated_types)(func)
return autoparams_decorator | python | {
"resource": ""
} |
q36640 | Binder.bind | train | def bind(self, cls, instance):
"""Bind a class to an instance."""
self._check_class(cls)
self._bindings[cls] = lambda: instance
logger.debug('Bound %s to an instance %s', cls, instance)
return self | python | {
"resource": ""
} |
q36641 | Binder.bind_to_constructor | train | def bind_to_constructor(self, cls, constructor):
"""Bind a class to a callable singleton constructor."""
self._check_class(cls)
if constructor is None:
raise InjectorException('Constructor cannot be None, key=%s' % cls)
self._bindings[cls] = _ConstructorBinding(constructor)
logger.debug('Bound %s to a constructor %s', cls, constructor)
return self | python | {
"resource": ""
} |
q36642 | Binder.bind_to_provider | train | def bind_to_provider(self, cls, provider):
"""Bind a class to a callable instance provider executed for each injection."""
self._check_class(cls)
if provider is None:
raise InjectorException('Provider cannot be None, key=%s' % cls)
self._bindings[cls] = provider
logger.debug('Bound %s to a provider %s', cls, provider)
return self | python | {
"resource": ""
} |
q36643 | Injector.get_instance | train | def get_instance(self, cls):
"""Return an instance for a class."""
binding = self._bindings.get(cls)
if binding:
return binding()
# Try to create a runtime binding.
with _BINDING_LOCK:
binding = self._bindings.get(cls)
if binding:
return binding()
if not self._bind_in_runtime:
raise InjectorException('No binding was found for key=%s' % cls)
if not callable(cls):
raise InjectorException(
'Cannot create a runtime binding, the key is not callable, key=%s' % cls)
instance = cls()
self._bindings[cls] = lambda: instance
logger.debug('Created a runtime binding for key=%s, instance=%s', cls, instance)
return instance | python | {
"resource": ""
} |
q36644 | read_csv | train | def read_csv(
filename: Union[PathLike, Iterator[str]],
delimiter: Optional[str]=',',
first_column_names: Optional[bool]=None,
dtype: str='float32',
) -> AnnData:
"""Read ``.csv`` file.
Same as :func:`~anndata.read_text` but with default delimiter ``','``.
Parameters
----------
filename
Data file.
delimiter
Delimiter that separates data within text file. If ``None``, will split at
arbitrary number of white spaces, which is different from enforcing
splitting at single white space ``' '``.
first_column_names
Assume the first column stores row names.
dtype
Numpy data type.
"""
return read_text(filename, delimiter, first_column_names, dtype) | python | {
"resource": ""
} |
q36645 | read_umi_tools | train | def read_umi_tools(filename: PathLike, dtype: str='float32') -> AnnData:
"""Read a gzipped condensed count matrix from umi_tools.
Parameters
----------
filename
File name to read from.
"""
# import pandas for conversion of a dict of dicts into a matrix
# import gzip to read a gzipped file :-)
import gzip
from pandas import DataFrame
dod = {} # this will contain basically everything
fh = gzip.open(fspath(filename))
header = fh.readline() # read the first line
for line in fh:
t = line.decode('ascii').split('\t') # gzip read bytes, hence the decoding
try:
dod[t[1]].update({t[0]:int(t[2])})
except KeyError:
dod[t[1]] = {t[0]:int(t[2])}
df = DataFrame.from_dict(dod, orient='index') # build the matrix
df.fillna(value=0., inplace=True) # many NaN, replace with zeros
return AnnData(np.array(df), {'obs_names': df.index}, {'var_names': df.columns}, dtype=dtype) | python | {
"resource": ""
} |
q36646 | read_loom | train | def read_loom(filename: PathLike, sparse: bool = True, cleanup: bool = False, X_name: str = 'spliced',
obs_names: str = 'CellID', var_names: str = 'Gene', dtype: str='float32', **kwargs) -> AnnData:
"""Read ``.loom``-formatted hdf5 file.
This reads the whole file into memory.
Beware that you have to explicitly state when you want to read the file as
sparse data.
Parameters
----------
filename
The filename.
sparse
Whether to read the data matrix as sparse.
cleanup:
Whether to remove all obs/var keys that do not store more than one unique value.
X_name:
Loompy key where the data matrix is stored.
obs_names:
Loompy key where the observation/cell names are stored.
var_names:
Loompy key where the variable/gene names are stored.
**kwargs:
Arguments to loompy.connect
"""
filename = fspath(filename) # allow passing pathlib.Path objects
from loompy import connect
with connect(filename, 'r', **kwargs) as lc:
if X_name not in lc.layers.keys(): X_name = ''
X = lc.layers[X_name].sparse().T.tocsr() if sparse else lc.layers[X_name][()].T
layers = OrderedDict()
if X_name != '': layers['matrix'] = lc.layers[''].sparse().T.tocsr() if sparse else lc.layers[''][()].T
for key in lc.layers.keys():
if key != '': layers[key] = lc.layers[key].sparse().T.tocsr() if sparse else lc.layers[key][()].T
obs = dict(lc.col_attrs)
if obs_names in obs.keys(): obs['obs_names'] = obs.pop(obs_names)
obsm_attrs = [k for k, v in obs.items() if v.ndim > 1 and v.shape[1] > 1]
obsm = {}
for key in obsm_attrs:
obsm[key] = obs.pop(key)
var = dict(lc.row_attrs)
if var_names in var.keys(): var['var_names'] = var.pop(var_names)
varm_attrs = [k for k, v in var.items() if v.ndim > 1 and v.shape[1] > 1]
varm = {}
for key in varm_attrs:
varm[key] = var.pop(key)
if cleanup:
for key in list(obs.keys()):
if len(set(obs[key])) == 1:
del obs[key]
for key in list(var.keys()):
if len(set(var[key])) == 1:
del var[key]
adata = AnnData(
X,
obs=obs, # not ideal: make the generator a dict...
var=var,
layers=layers,
obsm=obsm if obsm else None,
varm=varm if varm else None,
dtype=dtype)
return adata | python | {
"resource": ""
} |
q36647 | read_mtx | train | def read_mtx(filename: PathLike, dtype: str='float32') -> AnnData:
"""Read ``.mtx`` file.
Parameters
----------
filename
The filename.
dtype
Numpy data type.
"""
from scipy.io import mmread
# could be rewritten accounting for dtype to be more performant
X = mmread(fspath(filename)).astype(dtype)
from scipy.sparse import csr_matrix
X = csr_matrix(X)
return AnnData(X, dtype=dtype) | python | {
"resource": ""
} |
q36648 | iter_lines | train | def iter_lines(file_like: Iterable[str]) -> Generator[str, None, None]:
""" Helper for iterating only nonempty lines without line breaks"""
for line in file_like:
line = line.rstrip('\r\n')
if line:
yield line | python | {
"resource": ""
} |
q36649 | read_zarr | train | def read_zarr(store):
"""Read from a hierarchical Zarr array store.
Parameters
----------
store
The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class.
"""
if isinstance(store, Path):
store = str(store)
import zarr
f = zarr.open(store, mode='r')
d = {}
for key in f.keys():
_read_key_value_from_zarr(f, d, key)
return AnnData(*AnnData._args_from_dict(d)) | python | {
"resource": ""
} |
q36650 | read_h5ad | train | def read_h5ad(filename, backed: Optional[str] = None, chunk_size: int = 6000):
"""Read ``.h5ad``-formatted hdf5 file.
Parameters
----------
filename
File name of data file.
backed : {``None``, ``'r'``, ``'r+'``}
If ``'r'``, load :class:`~anndata.AnnData` in ``backed`` mode instead
of fully loading it into memory (`memory` mode). If you want to modify
backed attributes of the AnnData object, you need to choose ``'r+'``.
chunk_size
Used only when loading sparse dataset that is stored as dense.
Loading iterates through chunks of the dataset of this row size
until it reads the whole dataset.
Higher size means higher memory consumption and higher loading speed.
"""
if isinstance(backed, bool):
# We pass `None`s through to h5py.File, and its default is “a”
# (=“r+”, but create the file if it doesn’t exist)
backed = 'r+' if backed else None
warnings.warn(
"In a future version, read_h5ad will no longer explicitly support "
"boolean arguments. Specify the read mode, or leave `backed=None`.",
DeprecationWarning,
)
if backed:
# open in backed-mode
return AnnData(filename=filename, filemode=backed)
else:
# load everything into memory
constructor_args = _read_args_from_h5ad(filename=filename, chunk_size=chunk_size)
X = constructor_args[0]
dtype = None
if X is not None:
dtype = X.dtype.name # maintain dtype, since 0.7
return AnnData(*_read_args_from_h5ad(filename=filename, chunk_size=chunk_size), dtype=dtype) | python | {
"resource": ""
} |
q36651 | _read_args_from_h5ad | train | def _read_args_from_h5ad(
adata: AnnData = None,
filename: Optional[PathLike] = None,
mode: Optional[str] = None,
chunk_size: int = 6000
):
"""Return a tuple with the parameters for initializing AnnData.
Parameters
----------
filename
Defaults to the objects filename if ``None``.
"""
if filename is None and (adata is None or adata.filename is None):
raise ValueError('Need either a filename or an AnnData object with file backing')
# we need to be able to call the function without reference to self when
# not reading in backed mode
backed = mode is not None
if filename is None and not backed:
filename = adata.filename
d = {}
if backed:
f = adata.file._file
else:
f = h5py.File(filename, 'r')
for key in f.keys():
if backed and key in AnnData._BACKED_ATTRS:
d[key] = None
else:
_read_key_value_from_h5(f, d, key, chunk_size=chunk_size)
# backwards compat: save X with the correct name
if 'X' not in d:
if backed == 'r+':
for key in AnnData._H5_ALIASES['X']:
if key in d:
del f[key]
f.create_dataset('X', data=d[key])
break
# backwards compat: store sparse matrices properly
csr_keys = [key.replace('_csr_data', '')
for key in d if '_csr_data' in key]
for key in csr_keys:
d = load_sparse_csr(d, key=key)
if not backed:
f.close()
return AnnData._args_from_dict(d) | python | {
"resource": ""
} |
q36652 | make_index_unique | train | def make_index_unique(index: pd.Index, join: str = '-'):
"""Makes the index unique by appending '1', '2', etc.
The first occurance of a non-unique value is ignored.
Parameters
----------
join
The connecting string between name and integer.
Examples
--------
>>> adata1 = sc.AnnData(np.ones((3, 2)), {'obs_names': ['a', 'b', 'c']})
>>> adata2 = sc.AnnData(np.zeros((3, 2)), {'obs_names': ['d', 'b', 'b']})
>>> adata = adata1.concatenate(adata2)
>>> adata.obs_names
Index(['a', 'b', 'c', 'd', 'b', 'b'], dtype='object')
>>> adata.obs_names_make_unique()
>>> adata.obs_names
Index(['a', 'b', 'c', 'd', 'b-1', 'b-2'], dtype='object')
"""
if index.is_unique:
return index
from collections import defaultdict
values = index.values
indices_dup = index.duplicated(keep='first')
values_dup = values[indices_dup]
counter = defaultdict(lambda: 0)
for i, v in enumerate(values_dup):
counter[v] += 1
values_dup[i] += join + str(counter[v])
values[indices_dup] = values_dup
index = pd.Index(values)
return index | python | {
"resource": ""
} |
q36653 | _find_corresponding_multicol_key | train | def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None | python | {
"resource": ""
} |
q36654 | _gen_keys_from_multicol_key | train | def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys | python | {
"resource": ""
} |
q36655 | _check_2d_shape | train | def _check_2d_shape(X):
"""Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape))) | python | {
"resource": ""
} |
q36656 | BoundRecArr.to_df | train | def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df | python | {
"resource": ""
} |
q36657 | AnnDataFileManager.isopen | train | def isopen(self) -> bool:
"""State of backing file."""
if self._file is None:
return False
# try accessing the id attribute to see if the file is open
return bool(self._file.id) | python | {
"resource": ""
} |
q36658 | AnnData.transpose | train | def transpose(self) -> 'AnnData':
"""Transpose whole object.
Data matrix is transposed, observations and variables are interchanged.
"""
if not self.isbacked: X = self._X
else: X = self.file['X']
if self.isview:
raise ValueError(
'You\'re trying to transpose a view of an `AnnData`, which is currently not implemented. '
'Call `.copy()` before transposing.')
layers = {k:(v.T.tocsr() if sparse.isspmatrix_csr(v) else v.T) for (k, v) in self.layers.items(copy=False)}
if sparse.isspmatrix_csr(X):
return AnnData(X.T.tocsr(), self._var, self._obs, self._uns,
self._varm.flipped(), self._obsm.flipped(),
filename=self.filename, layers=layers, dtype=self.X.dtype.name)
return AnnData(X.T, self._var, self._obs, self._uns,
self._varm.flipped(), self._obsm.flipped(),
filename=self.filename, layers=layers, dtype=self.X.dtype.name) | python | {
"resource": ""
} |
q36659 | AnnData.copy | train | def copy(self, filename: Optional[PathLike] = None) -> 'AnnData':
"""Full copy, optionally on disk."""
if not self.isbacked:
return AnnData(self._X.copy() if self._X is not None else None,
self._obs.copy(),
self._var.copy(),
# deepcopy on DictView does not work and is unnecessary
# as uns was copied already before
self._uns.copy() if isinstance(self._uns, DictView) else deepcopy(self._uns),
self._obsm.copy(), self._varm.copy(),
raw=None if self._raw is None else self._raw.copy(),
layers=self.layers.as_dict(),
dtype=self._X.dtype.name if self._X is not None else 'float32')
else:
if filename is None:
raise ValueError(
'To copy an AnnData object in backed mode, '
'pass a filename: `.copy(filename=\'myfilename.h5ad\')`.')
if self.isview:
self.write(filename)
else:
from shutil import copyfile
copyfile(self.filename, filename)
return AnnData(filename=filename) | python | {
"resource": ""
} |
q36660 | AnnData.write_h5ad | train | def write_h5ad(
self,
filename: Optional[PathLike] = None,
compression: Optional[str] = None,
compression_opts: Union[int, Any] = None,
force_dense: Optional[bool] = None
):
"""Write ``.h5ad``-formatted hdf5 file.
.. note::
Setting compression to ``'gzip'`` can save disk space but
will slow down writing and subsequent reading. Prior to
v0.6.16, this was the default for parameter
``compression``.
Generally, if you have sparse data that are stored as a dense
matrix, you can dramatically improve performance and reduce
disk space by converting to a :class:`~scipy.sparse.csr_matrix`::
from scipy.sparse import csr_matrix
adata.X = csr_matrix(adata.X)
Parameters
----------
filename
Filename of data file. Defaults to backing file.
compression : ``None``, {``'gzip'``, ``'lzf'``} (default: ``None``)
See the h5py :ref:`dataset_compression`.
compression_opts
See the h5py :ref:`dataset_compression`.
force_dense
Write sparse data as a dense matrix. Defaults to ``True`` if object is
backed, otherwise to ``False``.
"""
from .readwrite.write import _write_h5ad
if filename is None and not self.isbacked:
raise ValueError('Provide a filename!')
if filename is None:
filename = self.filename
if force_dense is None:
force_dense = self.isbacked
_write_h5ad(filename, self, compression=compression,
compression_opts=compression_opts, force_dense=force_dense)
if self.isbacked:
self.file.close() | python | {
"resource": ""
} |
q36661 | AnnData.write_csvs | train | def write_csvs(self, dirname: PathLike, skip_data: bool = True, sep: str = ','):
"""Write annotation to ``.csv`` files.
It is not possible to recover the full :class:`~anndata.AnnData` from the
output of this function. Use :meth:`~anndata.AnnData.write` for this.
Parameters
----------
dirname
Name of directory to which to export.
skip_data
Skip the data matrix :attr:`X`.
sep
Separator for the data.
"""
from .readwrite.write import write_csvs
write_csvs(dirname, self, skip_data=skip_data, sep=sep) | python | {
"resource": ""
} |
q36662 | AnnData.write_loom | train | def write_loom(self, filename: PathLike, write_obsm_varm: bool = False):
"""Write ``.loom``-formatted hdf5 file.
Parameters
----------
filename
The filename.
"""
from .readwrite.write import write_loom
write_loom(filename, self, write_obsm_varm = write_obsm_varm) | python | {
"resource": ""
} |
q36663 | AnnData.write_zarr | train | def write_zarr(
self,
store: Union[MutableMapping, PathLike],
chunks: Union[bool, int, Tuple[int, ...]],
):
"""Write a hierarchical Zarr array store.
Parameters
----------
store
The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class.
chunks
Chunk shape.
"""
from .readwrite.write import write_zarr
write_zarr(store, self, chunks=chunks) | python | {
"resource": ""
} |
q36664 | AnnData._to_dict_fixed_width_arrays | train | def _to_dict_fixed_width_arrays(self, var_len_str=True):
"""A dict of arrays that stores data and annotation.
It is sufficient for reconstructing the object.
"""
self.strings_to_categoricals()
obs_rec, uns_obs = df_to_records_fixed_width(self._obs, var_len_str)
var_rec, uns_var = df_to_records_fixed_width(self._var, var_len_str)
layers = self.layers.as_dict()
d = {
'X': self._X,
'obs': obs_rec,
'var': var_rec,
'obsm': self._obsm,
'varm': self._varm,
'layers': layers,
# add the categories to the unstructured annotation
'uns': {**self._uns, **uns_obs, **uns_var}}
if self.raw is not None:
self.strings_to_categoricals(self.raw._var)
var_rec, uns_var = df_to_records_fixed_width(self.raw._var, var_len_str)
d['raw.X'] = self.raw.X
d['raw.var'] = var_rec
d['raw.varm'] = self.raw.varm
d['raw.cat'] = uns_var
return d | python | {
"resource": ""
} |
q36665 | Parser._expose_rule_functions | train | def _expose_rule_functions(self, expose_all_rules=False):
"""add parse functions for public grammar rules
Defines a function for each public grammar rule, based on
introspecting the grammar. For example, the `c_interval` rule
is exposed as a method `parse_c_interval` and used like this::
Parser.parse_c_interval('26+2_57-3') -> Interval(...)
"""
def make_parse_rule_function(rule_name):
"builds a wrapper function that parses a string with the specified rule"
def rule_fxn(s):
try:
return self._grammar(s).__getattr__(rule_name)()
except ometa.runtime.ParseError as exc:
raise HGVSParseError("{s}: char {exc.position}: {reason}".format(
s=s, exc=exc, reason=exc.formatReason()))
rule_fxn.__doc__ = "parse string s using `%s' rule" % rule_name
return rule_fxn
exposed_rule_re = re.compile(r"hgvs_(variant|position)|(c|g|m|n|p|r)"
r"_(edit|hgvs_position|interval|pos|posedit|variant)")
exposed_rules = [
m.replace("rule_", "") for m in dir(self._grammar._grammarClass)
if m.startswith("rule_")
]
if not expose_all_rules:
exposed_rules = [
rule_name for rule_name in exposed_rules if exposed_rule_re.match(rule_name)
]
for rule_name in exposed_rules:
att_name = "parse_" + rule_name
rule_fxn = make_parse_rule_function(rule_name)
self.__setattr__(att_name, rule_fxn)
self._logger.debug("Exposed {n} rules ({rules})".format(
n=len(exposed_rules), rules=", ".join(exposed_rules))) | python | {
"resource": ""
} |
q36666 | format_sequence | train | def format_sequence(seq, start=None, end=None, group_size=3):
"""print seq from [start, end) in groups of size
3 6 9 12 15
| | | | |
2001 AAA BBB CCC DDD EEE
"""
width = 100
loc_width = 9
sep = " "
body_sep = " : "
start = start or 0
end = end or len(seq)
bw = width - loc_width - len(body_sep)
assert group_size <= bw, "group size must be less than available line width"
gpl = int((bw + len(sep)) / (group_size + len(sep))) # groups per line
gpl = int(gpl / 5) * 5 if gpl > 20 else gpl
rpl = group_size * gpl
line_fmt = "{{l:>{lw}s}}{body_sep}{{body}}".format(lw=loc_width, body_sep=body_sep)
ge_fmt = "{{ge:>{gs}}}".format(gs=group_size)
blocks = []
for ls in range(start, end, rpl):
le = ls + rpl
groups = [
ge_fmt.format(ge=str(gs + group_size)[-group_size + 1:])
for gs in range(ls, le, group_size)
]
blocks += [line_fmt.format(l="", body=sep.join(groups)) + "\n"]
groups = [seq[gs:min(gs + group_size, end)] for gs in range(ls, le, group_size)]
blocks += [line_fmt.format(l=str(ls + 1), body=sep.join(groups)) + "\n"]
blocks += ["\n"]
return blocks | python | {
"resource": ""
} |
q36667 | _stage_from_version | train | def _stage_from_version(version):
"""return "prd", "stg", or "dev" for the given version string. A value is always returned"""
if version:
m = re.match(r"^(?P<xyz>\d+\.\d+\.\d+)(?P<extra>.*)", version)
if m:
return "stg" if m.group("extra") else "prd"
return "dev" | python | {
"resource": ""
} |
q36668 | _get_ncbi_db_url | train | def _get_ncbi_db_url():
"""returns NCBI DB URL based on environment variables and code version
* if NCBI_DB_URL is set, use that
* Otherwise, if _NCBI_URL_KEY is set, use that as the name of a
config file entry and use the corresponding URL
* Otherwise,
"""
if "NCBI_DB_URL" in os.environ:
return os.environ["NCBI_DB_URL"]
if "_NCBI_URL_KEY" in os.environ:
url_key = os.environ["_NCBI_URL_KEY"]
else:
sdlc = _stage_from_version(hgvs.__version__)
url_key = "public_{sdlc}".format(sdlc=sdlc)
return hgvs.global_config['NCBI'][url_key] | python | {
"resource": ""
} |
q36669 | NCBI_postgresql._get_cursor | train | def _get_cursor(self, n_retries=1):
"""Returns a context manager for obtained from a single or pooled
connection, and sets the PostgreSQL search_path to the schema
specified in the connection URL.
Although *connections* are threadsafe, *cursors* are bound to
connections and are *not* threadsafe. Do not share cursors
across threads.
Use this funciton like this::
with hdp._get_cursor() as cur:
# your code
Do not call this function outside a contextmanager.
"""
n_tries_rem = n_retries + 1
while n_tries_rem > 0:
try:
conn = self._pool.getconn() if self.pooling else self._conn
# autocommit=True obviates closing explicitly
conn.autocommit = True
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("set search_path = {self.url.schema};".format(self=self))
yield cur
# contextmanager executes these when context exits
cur.close()
if self.pooling:
self._pool.putconn(conn)
break
except psycopg2.OperationalError:
_logger.warning(
"Lost connection to {url}; attempting reconnect".format(url=self.url))
if self.pooling:
self._pool.closeall()
self._connect()
_logger.warning("Reconnected to {url}".format(url=self.url))
n_tries_rem -= 1
else:
# N.B. Probably never reached
raise HGVSError("Permanently lost connection to {url} ({n} retries)".format(
url=self.url, n=n_retries)) | python | {
"resource": ""
} |
q36670 | Projector.project_interval_forward | train | def project_interval_forward(self, c_interval):
"""
project c_interval on the source transcript to the
destination transcript
:param c_interval: an :class:`hgvs.interval.Interval` object on the source transcript
:returns: c_interval: an :class:`hgvs.interval.Interval` object on the destination transcript
"""
return self.dst_tm.g_to_c(self.src_tm.c_to_g(c_interval)) | python | {
"resource": ""
} |
q36671 | Projector.project_interval_backward | train | def project_interval_backward(self, c_interval):
"""
project c_interval on the destination transcript to the
source transcript
:param c_interval: an :class:`hgvs.interval.Interval` object on the destination transcript
:returns: c_interval: an :class:`hgvs.interval.Interval` object on the source transcript
"""
return self.src_tm.g_to_c(self.dst_tm.c_to_g(c_interval)) | python | {
"resource": ""
} |
q36672 | VariantMapper._convert_edit_check_strand | train | def _convert_edit_check_strand(strand, edit_in):
"""
Convert an edit from one type to another, based on the stand and type
"""
if isinstance(edit_in, hgvs.edit.NARefAlt):
if strand == 1:
edit_out = copy.deepcopy(edit_in)
else:
try:
# if smells like an int, do nothing
# TODO: should use ref_n, right?
int(edit_in.ref)
ref = edit_in.ref
except (ValueError, TypeError):
ref = reverse_complement(edit_in.ref)
edit_out = hgvs.edit.NARefAlt(
ref=ref,
alt=reverse_complement(edit_in.alt),
)
elif isinstance(edit_in, hgvs.edit.Dup):
if strand == 1:
edit_out = copy.deepcopy(edit_in)
else:
edit_out = hgvs.edit.Dup(ref=reverse_complement(edit_in.ref))
elif isinstance(edit_in, hgvs.edit.Inv):
if strand == 1:
edit_out = copy.deepcopy(edit_in)
else:
try:
int(edit_in.ref)
ref = edit_in.ref
except (ValueError, TypeError):
ref = reverse_complement(edit_in.ref)
edit_out = hgvs.edit.Inv(ref=ref)
else:
raise NotImplementedError("Only NARefAlt/Dup/Inv types are currently implemented")
return edit_out | python | {
"resource": ""
} |
q36673 | AssemblyMapper.t_to_p | train | def t_to_p(self, var_t):
"""Return a protein variant, or "non-coding" for non-coding variant types
CAUTION: Unlike other x_to_y methods that always return
SequenceVariant instances, this method returns a string when
the variant type is ``n``. This is intended as a convenience,
particularly when looping over ``relevant_transcripts``,
projecting with ``g_to_t``, then desiring a protein
representation for coding transcripts.
"""
if var_t.type == "n":
return "non-coding"
if var_t.type == "c":
return self.c_to_p(var_t)
raise HGVSInvalidVariantError("Expected a coding (c.) or non-coding (n.) variant; got " +
str(var_t)) | python | {
"resource": ""
} |
q36674 | AssemblyMapper._fetch_AlignmentMapper | train | def _fetch_AlignmentMapper(self, tx_ac, alt_ac=None, alt_aln_method=None):
"""convenience version of VariantMapper._fetch_AlignmentMapper that
derives alt_ac from transcript, assembly, and alt_aln_method
used to instantiate the AssemblyMapper instance
"""
if alt_ac is None:
alt_ac = self._alt_ac_for_tx_ac(tx_ac)
if alt_aln_method is None:
alt_aln_method = self.alt_aln_method
return super(AssemblyMapper, self)._fetch_AlignmentMapper(tx_ac, alt_ac, alt_aln_method) | python | {
"resource": ""
} |
q36675 | AssemblyMapper._maybe_normalize | train | def _maybe_normalize(self, var):
"""normalize variant if requested, and ignore HGVSUnsupportedOperationError
This is better than checking whether the variant is intronic because
future UTAs will support LRG, which will enable checking intronic variants.
"""
if self.normalize:
try:
return self._norm.normalize(var)
except HGVSUnsupportedOperationError as e:
_logger.warning(str(e) + "; returning unnormalized variant")
# fall through to return unnormalized variant
return var | python | {
"resource": ""
} |
q36676 | AlignmentMapper._parse_cigar | train | def _parse_cigar(self, cigar):
"""For a given CIGAR string, return the start positions of
each aligned segment in ref and tgt, and a list of CIGAR operators.
"""
ces = [m.groupdict() for m in cigar_re.finditer(cigar)]
ref_pos = [None] * len(ces)
tgt_pos = [None] * len(ces)
cigar_op = [None] * len(ces)
ref_cur = tgt_cur = 0
for i, ce in enumerate(ces):
ref_pos[i] = ref_cur
tgt_pos[i] = tgt_cur
cigar_op[i] = ce["op"]
step = int(ce["len"])
if ce["op"] in "=MINX":
ref_cur += step
if ce["op"] in "=MDX":
tgt_cur += step
ref_pos.append(ref_cur)
tgt_pos.append(tgt_cur)
return ref_pos, tgt_pos, cigar_op | python | {
"resource": ""
} |
q36677 | AlignmentMapper._map | train | def _map(self, from_pos, to_pos, pos, base):
"""Map position between aligned sequences
Positions in this function are 0-based.
"""
pos_i = -1
while pos_i < len(self.cigar_op) and pos >= from_pos[pos_i + 1]:
pos_i += 1
if pos_i == -1 or pos_i == len(self.cigar_op):
raise HGVSInvalidIntervalError("Position is beyond the bounds of transcript record")
if self.cigar_op[pos_i] in "=MX":
mapped_pos = to_pos[pos_i] + (pos - from_pos[pos_i])
mapped_pos_offset = 0
elif self.cigar_op[pos_i] in "DI":
if base == "start":
mapped_pos = to_pos[pos_i] - 1
elif base == "end":
mapped_pos = to_pos[pos_i]
mapped_pos_offset = 0
elif self.cigar_op[pos_i] == "N":
if pos - from_pos[pos_i] + 1 <= from_pos[pos_i + 1] - pos:
mapped_pos = to_pos[pos_i] - 1
mapped_pos_offset = pos - from_pos[pos_i] + 1
else:
mapped_pos = to_pos[pos_i]
mapped_pos_offset = -(from_pos[pos_i + 1] - pos)
return mapped_pos, mapped_pos_offset, self.cigar_op[pos_i] | python | {
"resource": ""
} |
q36678 | build_tx_cigar | train | def build_tx_cigar(exons, strand):
"""builds a single CIGAR string representing an alignment of the
transcript sequence to a reference sequence, including introns.
The input exons are expected to be in transcript order, and the
resulting CIGAR is also in transcript order.
>>> build_tx_cigar([], 1) is None
True
"""
cigarelem_re = re.compile(r"\d+[=DIMNX]")
def _reverse_cigar(c):
return ''.join(reversed(cigarelem_re.findall(c)))
if len(exons) == 0:
return None
# flip orientation of all CIGARs if on - strand
if strand == -1:
cigars = [_reverse_cigar(e["cigar"]) for e in exons]
else:
cigars = [e["cigar"] for e in exons]
tx_cigar = [cigars[0]] # exon 1
for i in range(1, len(cigars)): # and intron + exon pairs thereafter
intron = str(exons[i]["alt_start_i"] - exons[i - 1]["alt_end_i"]) + "N"
tx_cigar += [intron, cigars[i]]
tx_cigar_str = "".join(tx_cigar)
return tx_cigar_str | python | {
"resource": ""
} |
q36679 | AltSeqToHgvsp._check_if_ins_is_dup | train | def _check_if_ins_is_dup(self, start, insertion):
"""Helper to identify an insertion as a duplicate
:param start: 1-based insertion start
:type start: int
:param insertion: sequence
:type insertion: str
:return (is duplicate, variant start)
:rtype (bool, int)
"""
is_dup = False # assume no
variant_start = None
dup_candidate_start = start - len(insertion) - 1
dup_candidate = self._ref_seq[dup_candidate_start:dup_candidate_start + len(insertion)]
if insertion == dup_candidate:
is_dup = True
variant_start = dup_candidate_start + 1
return is_dup, variant_start | python | {
"resource": ""
} |
q36680 | AltSeqToHgvsp._create_variant | train | def _create_variant(self,
start,
end,
ref,
alt,
fsext_len=None,
is_dup=False,
acc=None,
is_ambiguous=False,
is_sub=False,
is_ext=False,
is_no_protein=False,
is_init_met=False):
"""Creates a SequenceVariant object"""
if is_init_met:
posedit = AARefAlt(ref=ref, alt=alt, init_met=True)
elif is_ambiguous:
posedit = None
else:
interval = Interval(start=start, end=end)
# Note - order matters
if is_no_protein:
edit = '0'
elif is_sub:
edit = AASub(ref=ref, alt=alt)
elif is_ext:
edit = AAExt(ref=ref, alt=alt, aaterm='*', length=fsext_len)
elif self._is_frameshift:
edit = AAFs(ref=ref, alt=alt, length=fsext_len)
elif is_dup:
edit = Dup()
elif ref == alt == '':
edit = AARefAlt(ref='', alt='')
else:
edit = AARefAlt(ref=ref, alt=alt)
posedit = PosEdit(
pos=interval,
edit=edit,
uncertain=hgvs.global_config.mapping.inferred_p_is_uncertain)
var_p = hgvs.sequencevariant.SequenceVariant(acc, 'p', posedit)
return var_p | python | {
"resource": ""
} |
q36681 | connect | train | def connect(db_url=None,
pooling=hgvs.global_config.uta.pooling,
application_name=None,
mode=None,
cache=None):
"""Connect to a UTA database instance and return a UTA interface instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database/schema (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:anonymous@uta.biocommons.org/uta/uta_20170707'
A local postgresql database:
postgresql://localhost/uta_dev/uta_20170707
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool.
"""
_logger.debug('connecting to ' + str(db_url) + '...')
if db_url is None:
db_url = _get_uta_db_url()
url = _parse_url(db_url)
if url.scheme == 'sqlite':
conn = UTA_sqlite(url, mode, cache)
elif url.scheme == 'postgresql':
conn = UTA_postgresql(
url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache)
else:
# fell through connection scheme cases
raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url))
_logger.info('connected to ' + str(db_url) + '...')
return conn | python | {
"resource": ""
} |
q36682 | UTABase.get_tx_for_region | train | def get_tx_for_region(self, alt_ac, alt_aln_method, start_i, end_i):
"""
return transcripts that overlap given region
:param str alt_ac: reference sequence (e.g., NC_000007.13)
:param str alt_aln_method: alignment method (e.g., splign)
:param int start_i: 5' bound of region
:param int end_i: 3' bound of region
"""
return self._fetchall(self._queries['tx_for_region'],
[alt_ac, alt_aln_method, start_i, end_i]) | python | {
"resource": ""
} |
q36683 | UTABase.get_tx_identity_info | train | def get_tx_identity_info(self, tx_ac):
"""returns features associated with a single transcript.
:param tx_ac: transcript accession with version (e.g., 'NM_199425.2')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+-------------
tx_ac | NM_199425.2
alt_ac | NM_199425.2
alt_aln_method | transcript
cds_start_i | 283
cds_end_i | 1003
lengths | {707,79,410}
hgnc | VSX1
"""
rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac])
if len(rows) == 0:
raise HGVSDataNotAvailableError(
"No transcript definition for (tx_ac={tx_ac})".format(tx_ac=tx_ac))
return rows[0] | python | {
"resource": ""
} |
q36684 | UTABase.get_similar_transcripts | train | def get_similar_transcripts(self, tx_ac):
"""Return a list of transcripts that are similar to the given
transcript, with relevant similarity criteria.
>> sim_tx = hdp.get_similar_transcripts('NM_001285829.1')
>> dict(sim_tx[0])
{ 'cds_eq': False,
'cds_es_fp_eq': False,
'es_fp_eq': True,
'tx_ac1': 'NM_001285829.1',
'tx_ac2': 'ENST00000498907' }
where:
* cds_eq means that the CDS sequences are identical
* es_fp_eq means that the full exon structures are identical
(i.e., incl. UTR)
* cds_es_fp_eq means that the cds-clipped portions of the exon
structures are identical (i.e., ecluding. UTR)
* Hint: "es" = "exon set", "fp" = "fingerprint", "eq" = "equal"
"exon structure" refers to the start and end coordinates on a
specified reference sequence. Thus, having the same exon
structure means that the transcripts are defined on the same
reference sequence and have the same exon spans on that
sequence.
"""
rows = self._fetchall(self._queries['tx_similar'], [tx_ac])
return rows | python | {
"resource": ""
} |
q36685 | _make_key | train | def _make_key(func,
args,
kwds,
typed,
kwd_mark=(object(), ),
fasttypes={int, str, frozenset, type(None)},
sorted=sorted,
tuple=tuple,
type=type,
len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
key += kwd_mark
key += ('__func__', func)
if kwds:
sorted_items = sorted(kwds.items())
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key) | python | {
"resource": ""
} |
q36686 | Normalizer._get_boundary | train | def _get_boundary(self, var):
"""Get the position of exon-intron boundary for current variant
"""
if var.type == "r" or var.type == "n":
if self.cross_boundaries:
return 0, float("inf")
else:
# Get genomic sequence access number for this transcript
map_info = self.hdp.get_tx_mapping_options(var.ac)
if not map_info:
raise HGVSDataNotAvailableError(
"No mapping info available for {ac}".format(ac=var.ac))
map_info = [
item for item in map_info if item["alt_aln_method"] == self.alt_aln_method
]
alt_ac = map_info[0]["alt_ac"]
# Get tx info
tx_info = self.hdp.get_tx_info(var.ac, alt_ac, self.alt_aln_method)
cds_start = tx_info["cds_start_i"]
cds_end = tx_info["cds_end_i"]
# Get exon info
exon_info = self.hdp.get_tx_exons(var.ac, alt_ac, self.alt_aln_method)
exon_starts = [exon["tx_start_i"] for exon in exon_info]
exon_ends = [exon["tx_end_i"] for exon in exon_info]
exon_starts.sort()
exon_ends.sort()
exon_starts.append(exon_ends[-1])
exon_ends.append(float("inf"))
# Find the end pos of the exon where the var locates
left = 0
right = float("inf")
# TODO: #242: implement methods to find tx regions
for i, _ in enumerate(exon_starts):
if (var.posedit.pos.start.base - 1 >= exon_starts[i]
and var.posedit.pos.start.base - 1 < exon_ends[i]):
break
for j, _ in enumerate(exon_starts):
if (var.posedit.pos.end.base - 1 >= exon_starts[j]
and var.posedit.pos.end.base - 1 < exon_ends[j]):
break
if i != j:
raise HGVSUnsupportedOperationError(
"Unsupported normalization of variants spanning the exon-intron boundary ({var})"
.format(var=var))
left = exon_starts[i]
right = exon_ends[i]
if cds_start is None:
pass
elif var.posedit.pos.end.base - 1 < cds_start:
right = min(right, cds_start)
elif var.posedit.pos.start.base - 1 >= cds_start:
left = max(left, cds_start)
else:
raise HGVSUnsupportedOperationError(
"Unsupported normalization of variants spanning the UTR-exon boundary ({var})"
.format(var=var))
if cds_end is None:
pass
elif var.posedit.pos.start.base - 1 >= cds_end:
left = max(left, cds_end)
elif var.posedit.pos.end.base - 1 < cds_end:
right = min(right, cds_end)
else:
raise HGVSUnsupportedOperationError(
"Unsupported normalization of variants spanning the exon-UTR boundary ({var})"
.format(var=var))
return left, right
else:
# For variant type of g and m etc.
return 0, float("inf") | python | {
"resource": ""
} |
q36687 | Normalizer._get_tgt_length | train | def _get_tgt_length(self, var):
"""Get the total length of the whole reference sequence
"""
if var.type == "g" or var.type == "m":
return float("inf")
else:
# Get genomic sequence access number for this transcript
identity_info = self.hdp.get_tx_identity_info(var.ac)
if not identity_info:
raise HGVSDataNotAvailableError(
"No identity info available for {ac}".format(ac=var.ac))
tgt_len = sum(identity_info["lengths"])
return tgt_len | python | {
"resource": ""
} |
q36688 | Normalizer._fetch_bounded_seq | train | def _fetch_bounded_seq(self, var, start, end, window_size, boundary):
"""Fetch reference sequence from hgvs data provider.
The start position is 0 and the interval is half open
"""
var_len = end - start - window_size
start = start if start >= boundary[0] else boundary[0]
end = end if end <= boundary[1] else boundary[1]
if start >= end:
return ""
seq = self.hdp.get_seq(var.ac, start, end)
if len(seq) < end - start and len(seq) < var_len:
raise HGVSInvalidVariantError(
"Variant span is outside sequence bounds ({var})".format(var=var))
return seq | python | {
"resource": ""
} |
q36689 | Normalizer._get_ref_alt | train | def _get_ref_alt(self, var, boundary):
"""Get reference allele and alternative allele of the variant
"""
# Get reference allele
if var.posedit.edit.type == "ins" or var.posedit.edit.type == "dup":
ref = ""
else:
# For NARefAlt and Inv
if var.posedit.edit.ref_s is None or var.posedit.edit.ref == "":
ref = self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1,
var.posedit.pos.end.base, 0, boundary)
else:
ref = var.posedit.edit.ref
# Get alternative allele
if var.posedit.edit.type == "sub" or var.posedit.edit.type == "delins" or var.posedit.edit.type == "ins":
alt = var.posedit.edit.alt
elif var.posedit.edit.type == "del":
alt = ""
elif var.posedit.edit.type == "dup":
alt = var.posedit.edit.ref or self._fetch_bounded_seq(
var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary)
elif var.posedit.edit.type == "inv":
alt = reverse_complement(ref)
elif var.posedit.edit.type == "identity":
alt = ref
return ref, alt | python | {
"resource": ""
} |
q36690 | trim_common_suffixes | train | def trim_common_suffixes(strs, min_len=0):
"""
trim common suffixes
>>> trim_common_suffixes('A', 1)
(0, 'A')
"""
if len(strs) < 2:
return 0, strs
rev_strs = [s[::-1] for s in strs]
trimmed, rev_strs = trim_common_prefixes(rev_strs, min_len)
if trimmed:
strs = [s[::-1] for s in rev_strs]
return trimmed, strs | python | {
"resource": ""
} |
q36691 | trim_common_prefixes | train | def trim_common_prefixes(strs, min_len=0):
"""trim common prefixes"""
trimmed = 0
if len(strs) > 1:
s1 = min(strs)
s2 = max(strs)
for i in range(len(s1) - min_len):
if s1[i] != s2[i]:
break
trimmed = i + 1
if trimmed > 0:
strs = [s[trimmed:] for s in strs]
return trimmed, strs | python | {
"resource": ""
} |
q36692 | normalize_alleles_left | train | def normalize_alleles_left(ref, start, stop, alleles, bound, ref_step, shuffle=True):
"""
Normalize loci by removing extraneous reference padding
>>> normalize_alleles_left('A', 1, 2, 'A', 1, 2)
shuffled_alleles(start=1, stop=2, alleles='A')
"""
normalized_alleles = namedtuple('shuffled_alleles', 'start stop alleles')
if len(alleles) < 2:
return normalized_alleles(start, stop, alleles)
# STEP 1: Trim common suffix
trimmed, alleles = trim_common_suffixes(alleles)
stop -= trimmed
# STEP 2: Trim common prefix
trimmed, alleles = trim_common_prefixes(alleles)
start += trimmed
# assert bound <= start,'start={:d}, left bound={:d}'.format(start, bound)
# STEP 3: While a null allele exists, left shuffle by prepending alleles
# with reference and trimming common suffixes
while shuffle and '' in alleles and start > bound:
step = min(ref_step, start - bound)
r = ref[start - step:start].upper()
new_alleles = [r + a for a in alleles]
trimmed, new_alleles = trim_common_suffixes(new_alleles)
if not trimmed:
break
start -= trimmed
stop -= trimmed
if trimmed == step:
alleles = new_alleles
else:
left = step - trimmed
alleles = [a[left:] for a in new_alleles]
break
return normalized_alleles(start, stop, tuple(alleles)) | python | {
"resource": ""
} |
q36693 | validate_type_ac_pair | train | def validate_type_ac_pair(type, ac):
"""validate that accession is correct for variant type AND that
accession is fully specified.
"""
assert type in valid_pairs, "Unknown variant type " + type
if valid_pairs[type].match(ac):
return (ValidationLevel.VALID,
"Accession ({ac}) is compatible with variant type {type}".format(ac=ac, type=type))
elif invalid_pairs[type].match(ac):
return (ValidationLevel.ERROR,
"Accession ({ac}) is not compatible with variant type {type}".format(
ac=ac, type=type))
else:
return (ValidationLevel.WARNING,
"Accession ({ac}) is not known to be compatible with variant type {type}".format(
ac=ac, type=type)) | python | {
"resource": ""
} |
q36694 | AltSeqBuilder.build_altseq | train | def build_altseq(self):
"""given a variant and a sequence, incorporate the variant and return the new sequence
Data structure returned is analogous to the data structure used to return the variant sequence,
but with an additional parameter denoting the start of a frameshift that should affect all bases
downstream.
:returns variant sequence data
:rtype list of dictionaries
"""
NOT_CDS = "not_cds_variant"
WHOLE_GENE_DELETED = "whole_gene_deleted"
type_map = {
NARefAlt: self._incorporate_delins,
Dup: self._incorporate_dup,
Inv: self._incorporate_inv,
Repeat: self._incorporate_repeat,
NOT_CDS: self._create_alt_equals_ref_noncds,
WHOLE_GENE_DELETED: self._create_no_protein
}
# should loop over each allele rather than assume only 1 variant; return a list for now
alt_data = []
variant_location = self._get_variant_region()
if variant_location == self.EXON:
edit_type = type(self._var_c.posedit.edit)
elif variant_location == self.INTRON:
edit_type = NOT_CDS
elif variant_location == self.T_UTR:
edit_type = NOT_CDS
elif variant_location == self.F_UTR:
# TODO: handle case where variant introduces a Met (new start)
edit_type = NOT_CDS
elif variant_location == self.WHOLE_GENE:
if self._var_c.posedit.edit.type == "del":
edit_type = WHOLE_GENE_DELETED
elif self._var_c.posedit.edit.type == "dup":
_logger.warning(
"Whole-gene duplication; consequence assumed to not affect protein product")
edit_type = NOT_CDS
elif self._var_c.posedit.edit.type == "inv":
_logger.warning(
"Whole-gene inversion; consequence assumed to not affect protein product")
edit_type = NOT_CDS
else:
edit_type = NOT_CDS
else: # should never get here
raise ValueError("value_location = {}".format(variant_location))
try:
this_alt_data = type_map[edit_type]()
except KeyError:
raise NotImplementedError("c to p translation unsupported for {} type {}".format(
self._var_c, edit_type))
# get the start of the "terminal" frameshift (i.e. one never "cancelled out")
this_alt_data = self._get_frameshift_start(this_alt_data)
alt_data.append(this_alt_data)
if DBG:
print(this_alt_data.transcript_sequence)
return alt_data | python | {
"resource": ""
} |
q36695 | AltSeqBuilder._incorporate_dup | train | def _incorporate_dup(self):
"""Incorporate dup into sequence"""
seq, cds_start, cds_stop, start, end = self._setup_incorporate()
dup_seq = seq[start:end]
seq[end:end] = dup_seq
is_frameshift = len(dup_seq) % 3 != 0
variant_start_aa = int(math.ceil((self._var_c.posedit.pos.end.base + 1) / 3.0))
alt_data = AltTranscriptData(
seq,
cds_start,
cds_stop,
is_frameshift,
variant_start_aa,
self._transcript_data.protein_accession,
is_ambiguous=self._ref_has_multiple_stops)
return alt_data | python | {
"resource": ""
} |
q36696 | AltSeqBuilder._incorporate_inv | train | def _incorporate_inv(self):
"""Incorporate inv into sequence"""
seq, cds_start, cds_stop, start, end = self._setup_incorporate()
seq[start:end] = list(reverse_complement(''.join(seq[start:end])))
is_frameshift = False
variant_start_aa = max(int(math.ceil((self._var_c.posedit.pos.start.base) / 3.0)), 1)
alt_data = AltTranscriptData(
seq,
cds_start,
cds_stop,
is_frameshift,
variant_start_aa,
self._transcript_data.protein_accession,
is_ambiguous=self._ref_has_multiple_stops)
return alt_data | python | {
"resource": ""
} |
q36697 | AltSeqBuilder._create_no_protein | train | def _create_no_protein(self):
"""Create a no-protein result"""
alt_data = AltTranscriptData([],
None,
None,
False,
None,
self._transcript_data.protein_accession,
is_ambiguous=False)
return alt_data | python | {
"resource": ""
} |
q36698 | S3PreparedRequest.prepare_headers | train | def prepare_headers(self, headers, metadata, queue_derive=True):
"""Convert a dictionary of metadata into S3 compatible HTTP
headers, and append headers to ``headers``.
:type metadata: dict
:param metadata: Metadata to be converted into S3 HTTP Headers
and appended to ``headers``.
:type headers: dict
:param headers: (optional) S3 compatible HTTP headers.
"""
if not metadata.get('scanner'):
scanner = 'Internet Archive Python library {0}'.format(__version__)
metadata['scanner'] = scanner
prepared_metadata = prepare_metadata(metadata)
headers['x-archive-auto-make-bucket'] = '1'
if queue_derive is False:
headers['x-archive-queue-derive'] = '0'
else:
headers['x-archive-queue-derive'] = '1'
for meta_key, meta_value in prepared_metadata.items():
# Encode arrays into JSON strings because Archive.org does not
# yet support complex metadata structures in
# <identifier>_meta.xml.
if isinstance(meta_value, dict):
meta_value = json.dumps(meta_value)
# Convert the metadata value into a list if it is not already
# iterable.
if (isinstance(meta_value, six.string_types) or
not hasattr(meta_value, '__iter__')):
meta_value = [meta_value]
# Convert metadata items into HTTP headers and add to
# ``headers`` dict.
for i, value in enumerate(meta_value):
if not value:
continue
header_key = 'x-archive-meta{0:02d}-{1}'.format(i, meta_key)
if (isinstance(value, six.string_types) and needs_quote(value)):
if six.PY2 and isinstance(value, six.text_type):
value = value.encode('utf-8')
value = 'uri({0})'.format(urllib.parse.quote(value))
# because rfc822 http headers disallow _ in names, IA-S3 will
# translate two hyphens in a row (--) into an underscore (_).
header_key = header_key.replace('_', '--')
headers[header_key] = value
super(S3PreparedRequest, self).prepare_headers(headers) | python | {
"resource": ""
} |
q36699 | load_ia_module | train | def load_ia_module(cmd):
"""Dynamically import ia module."""
try:
if cmd in list(cmd_aliases.keys()) + list(cmd_aliases.values()):
_module = 'internetarchive.cli.ia_{0}'.format(cmd)
return __import__(_module, fromlist=['internetarchive.cli'])
else:
_module = 'ia_{0}'.format(cmd)
for ep in iter_entry_points('internetarchive.cli.plugins'):
if ep.name == _module:
return ep.load()
raise ImportError
except (ImportError, DistributionNotFound):
print("error: '{0}' is not an ia command! See 'ia help'".format(cmd),
file=sys.stderr)
matches = '\t'.join(difflib.get_close_matches(cmd, cmd_aliases.values()))
if matches:
print('\nDid you mean one of these?\n\t{0}'.format(matches))
sys.exit(127) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.