sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _bind_service(package_name, cls_name, binder=Injected):
"""Bind service to application injector.
:param package_name: service package
:type package_name: str
:param cls_name: service class
:type cls_name: str
:param binder: current application binder, injected
:type binder: Binder
"""
module = importlib.import_module(package_name)
cls = getattr(module, cls_name)
binder.bind(
cls,
to=binder.injector.create_object(cls),
scope=singleton
)
logging.debug("Created {0} binding.".format(cls)) | Bind service to application injector.
:param package_name: service package
:type package_name: str
:param cls_name: service class
:type cls_name: str
:param binder: current application binder, injected
:type binder: Binder | entailment |
def initialize(config):
"""Initialize method.
:param config: current application config, injected
:type config: Config
"""
service_injection_config = config.get('SERVICE_INJECTION', ())
if not isinstance(service_injection_config, (tuple, list)):
service_injection_config = (service_injection_config,)
for si_conf in service_injection_config:
if isinstance(si_conf, str):
package_name, cls_name = si_conf.rsplit('.', 1)
ServiceInitializer._bind_service(package_name, cls_name)
elif isinstance(si_conf, dict):
services = si_conf['list']
service_package = si_conf['package']
for cls_name in services:
module_name = camelcase_to_underscore(cls_name)
package_name = "{0}.{1}".format(service_package, module_name)
ServiceInitializer._bind_service(package_name, cls_name) | Initialize method.
:param config: current application config, injected
:type config: Config | entailment |
def background_task_method(task):
"""Decorate an object method as a background task (called with help of
gearman).
You have to create a task which will handle the gearman call. The
method arguments will be encoded as JSON.
:param task: name of the task
:type task: str
:return: decorated function
"""
# TODO ako vysledok vrat nejaky JOB ID, aby sa dalo checkovat na pozadi
# TODO vytvorit este vseobecny background_task nielen pre metody
def decorator_fn(fn):
gearman = None
@inject(config=Config)
def gearman_connect(config):
# type: (Config) -> GearmanService
if 'GEARMAN' not in config or 'host' not in config['GEARMAN'] or 'GEARMAN_TASK_NAME' not in config:
raise Exception("Missing gearman settings (trying to use backgorund task)")
gearman_host = (config['GEARMAN']['host'], config['GEARMAN']['port']) if config['GEARMAN']['port'] \
else config['GEARMAN']['host']
gearman_service = GearmanService({'HOST': [gearman_host], 'TASK_NAME': config['GEARMAN_TASK_NAME']})
gearman_service.set_blocking(False)
return gearman_service
def get_gearman_client():
# type: () -> GearmanService
global gearman
if not gearman:
gearman = gearman_connect()
return gearman
@wraps(fn)
def background_task_decorator(*args, **kwargs):
# The first of the args is self.
t = RawTask(task, dict(method=fn.__name__, args=args[1:], kwargs=kwargs))
t_result = get_gearman_client().call(t, [JsonTask])
return t_result.result
background_task_decorator._background_fn = fn
return background_task_decorator
return decorator_fn | Decorate an object method as a background task (called with help of
gearman).
You have to create a task which will handle the gearman call. The
method arguments will be encoded as JSON.
:param task: name of the task
:type task: str
:return: decorated function | entailment |
def _get_link_pages(page, per_page, count, page_url):
# type: (int, int, int, str) -> Dict[str, str]
"""Create link header for page metadata.
:param page: current page
:param per_page: page limit
:param count: count of all resources
:param page_url: url for resources
:return: dictionary with name of the link as key and its url as value
"""
current_page = _page_arg(page)
links = {}
end = page * per_page
if page > 1:
links['prev'] = page_url.replace(current_page, _page_arg(page - 1))
if end < count:
links['next'] = page_url.replace(current_page, _page_arg(page + 1))
if per_page < count:
links['first'] = page_url.replace(current_page, _page_arg(1))
links['last'] = page_url.replace(current_page, _page_arg((count + per_page - 1) // per_page))
return links | Create link header for page metadata.
:param page: current page
:param per_page: page limit
:param count: count of all resources
:param page_url: url for resources
:return: dictionary with name of the link as key and its url as value | entailment |
def to_filter(self, query, arg):
"""Json-server filter using the _or_ operator."""
return filter_from_url_arg(self.model_cls, query, arg, query_operator=or_) | Json-server filter using the _or_ operator. | entailment |
def create(self, *args, **kwargs):
"""Adds created http status response and location link."""
resource = super(JsonServerResource, self).create(*args, **kwargs)
return ResourceResult(
body=resource,
status=get_http_status_code_value(http.client.CREATED),
location="{}/{}".format(request.url, resource.get_id())
) | Adds created http status response and location link. | entailment |
def _create_filter_by(self):
"""Transform the json-server filter arguments to model-resource ones."""
filter_by = []
for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3
if name not in _SKIPPED_ARGUMENTS:
column = _re_column_name.search(name).group(1)
if column not in self._model_columns:
continue
for value in values:
if name.endswith('_ne'):
filter_by.append(name[:-3] + '!=' + value)
elif name.endswith('_lte'):
filter_by.append(name[:-4] + '<=' + value)
elif name.endswith('_gte'):
filter_by.append(name[:-4] + '>=' + value)
elif name.endswith('_like'):
filter_by.append(name[:-5] + '::like::%' + value + '%')
else:
filter_by.append(name.replace('__', '.') + '==' + value)
filter_by += self._create_fulltext_query()
return ','.join(filter_by) | Transform the json-server filter arguments to model-resource ones. | entailment |
def _create_related(args):
# type: (Dict) -> None
"""Create related field from `_embed` arguments."""
if '_embed' in request.args:
embeds = request.args.getlist('_embed')
args['related'] = ','.join(embeds)
del args['_embed'] | Create related field from `_embed` arguments. | entailment |
def _create_fulltext_query(self):
"""Support the json-server fulltext search with a broad LIKE filter."""
filter_by = []
if 'q' in request.args:
columns = flat_model(model_tree(self.__class__.__name__, self.model_cls))
for q in request.args.getlist('q'):
filter_by += ['{col}::like::%{q}%'.format(col=col, q=q) for col in columns]
return filter_by | Support the json-server fulltext search with a broad LIKE filter. | entailment |
def _transform_list_args(self, args):
# type: (dict) -> None
"""Transforms all list arguments from json-server to model-resource ones.
This modifies the given arguments.
"""
if '_limit' in args:
args['limit'] = int(args['_limit'])
del args['_limit']
if '_page' in args:
page = int(args['_page'])
if page < 0:
page = 1
args['page'] = page
del args['_page']
if 'limit' not in args:
args['limit'] = 10
if '_end' in args:
end = int(args['_end'])
args['limit'] = end - int(args.get('_start', 0))
if '_start' in args:
args['offset'] = args['_start']
del args['_start']
if '_sort' in args:
args['order_by'] = args['_sort'].replace('__', '.')
del args['_sort']
if args.get('_order', 'ASC') == 'DESC':
args['order_by'] = '-' + args['order_by']
if '_order' in args:
del args['_order']
filter_by = self._create_filter_by()
if filter_by:
args['filter_by'] = filter_by | Transforms all list arguments from json-server to model-resource ones.
This modifies the given arguments. | entailment |
def read(self, params, args, data):
"""Modifies the parameters and adds metadata for read results."""
result_count = None
result_links = None
if params is None:
params = []
if args:
args = args.copy()
else:
args = {}
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if not row_id:
self._transform_list_args(args)
if 'page' in args or 'limit' in args:
ctx = self._create_context(params, args, data)
result_count = self._get_collection_count(ctx)
if 'page' in args:
result_links = _get_link_pages(
page=args['page'],
per_page=int(args['limit']),
count=result_count,
page_url=request.url
)
if 'limit' not in args:
args['limit'] = 'unlimited'
self._create_related(args)
try:
return ResourceResult(
body=super(JsonServerResource, self).read(params, args, data),
count=result_count,
links=result_links
)
except NoResultFound:
return NOT_FOUND | Modifies the parameters and adds metadata for read results. | entailment |
def update(self, *args, **kwargs):
"""Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB.
"""
if request.method == 'PUT':
logging.warning("Called not implemented resource method PUT")
resource = super(JsonServerResource, self).update(*args, **kwargs)
if resource:
return resource
else:
return NOT_FOUND | Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB. | entailment |
def delete(self, params, args, data):
"""Supports only singular delete and adds proper http status."""
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if row_id:
deleted = self._delete_one(row_id, ctx)
if deleted:
return ResourceResult(body={})
else:
return NOT_FOUND
else:
return NOT_FOUND | Supports only singular delete and adds proper http status. | entailment |
def run_task(task_cls, task_data):
"""Instantiate and run the perform method od given task data.
:param task_cls: task class
:param task_data: task data
:type task_data: TaskData
:return: task's result
"""
task = instantiate(task_cls)
task_callable = get_callable(task)
return task_callable(TaskData(task_data)) | Instantiate and run the perform method od given task data.
:param task_cls: task class
:param task_data: task data
:type task_data: TaskData
:return: task's result | entailment |
def run_task_json(task_cls, task_data):
"""Instantiate and run the perform method od given task data.
:param task_cls: task class
:param task_data: task data
:type task_data: TaskData
:return: task's result
"""
# TODO what does set_skipping_json do?
task = instantiate(task_cls)
task_callable = get_callable(task)
td = TaskData(task_data)
td.set_skipping_json(True)
return task_callable(td) | Instantiate and run the perform method od given task data.
:param task_cls: task class
:param task_data: task data
:type task_data: TaskData
:return: task's result | entailment |
def _aggregate(data, norm=True, sort_by='value', keys=None):
'''
Counts the number of occurances of each item in 'data'.
Inputs
data: a list of values.
norm: normalize the resulting counts (as percent)
sort_by: how to sort the retured data. Options are 'value' and 'count'.
Output
a non-redundant list of values (from 'data') and a list of counts.
'''
if keys:
vdict = {k: 0 for k in keys}
for d in data:
if d in keys:
vdict[d] += 1
else:
vdict = {}
for d in data:
vdict[d] = vdict[d] + 1 if d in vdict else 1
vals = [(k, v) for k, v in vdict.items()]
if sort_by == 'value':
vals.sort(key=lambda x: x[0])
else:
vals.sort(key=lambda x: x[1])
xs = [v[0] for v in vals]
if norm:
raw_y = [v[1] for v in vals]
total_y = sum(raw_y)
ys = [100. * y / total_y for y in raw_y]
else:
ys = [v[1] for v in vals]
return xs, ys | Counts the number of occurances of each item in 'data'.
Inputs
data: a list of values.
norm: normalize the resulting counts (as percent)
sort_by: how to sort the retured data. Options are 'value' and 'count'.
Output
a non-redundant list of values (from 'data') and a list of counts. | entailment |
def generate_apiary_doc(task_router):
"""Generate apiary documentation.
Create a Apiary generator and add application packages to it.
:param task_router: task router, injected
:type task_router: TaskRouter
:return: apiary generator
:rtype: ApiaryDoc
"""
generator = ApiaryDoc()
for m in task_router.get_task_packages() + get_method_packages():
m = importlib.import_module(m)
generator.docmodule(m)
return generator | Generate apiary documentation.
Create a Apiary generator and add application packages to it.
:param task_router: task router, injected
:type task_router: TaskRouter
:return: apiary generator
:rtype: ApiaryDoc | entailment |
def setup_logging(logfile, print_log_location=True, debug=False):
'''
Set up logging using the built-in ``logging`` package.
A stream handler is added to all logs, so that logs at or above
``logging.INFO`` level are printed to screen as well as written
to the log file.
Arguments:
logfile (str): Path to the log file. If the parent directory
does not exist, it will be created. Required.
print_log_location (bool): If ``True``, the log path will be
written to the log upon initialization. Default is ``True``.
debug (bool): If true, the log level will be set to ``logging.DEBUG``.
If ``False``, the log level will be set to ``logging.INFO``.
Default is ``False``.
'''
log_dir = os.path.dirname(logfile)
make_dir(log_dir)
fmt = '[%(levelname)s] %(name)s %(asctime)s %(message)s'
if debug:
logging.basicConfig(filename=logfile,
filemode='w',
format=fmt,
level=logging.DEBUG)
else:
logging.basicConfig(filename=logfile,
filemode='w',
format=fmt,
level=logging.INFO)
logger = logging.getLogger('log')
logger = add_stream_handler(logger)
if print_log_location:
logger.info('LOG LOCATION: {}'.format(logfile)) | Set up logging using the built-in ``logging`` package.
A stream handler is added to all logs, so that logs at or above
``logging.INFO`` level are printed to screen as well as written
to the log file.
Arguments:
logfile (str): Path to the log file. If the parent directory
does not exist, it will be created. Required.
print_log_location (bool): If ``True``, the log path will be
written to the log upon initialization. Default is ``True``.
debug (bool): If true, the log level will be set to ``logging.DEBUG``.
If ``False``, the log level will be set to ``logging.INFO``.
Default is ``False``. | entailment |
def get_logger(name=None):
'''
Get a logging handle.
As with ``setup_logging``, a stream handler is added to the
log handle.
Arguments:
name (str): Name of the log handle. Default is ``None``.
'''
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
logger = add_stream_handler(logger)
return logger | Get a logging handle.
As with ``setup_logging``, a stream handler is added to the
log handle.
Arguments:
name (str): Name of the log handle. Default is ``None``. | entailment |
def inject(**bindings):
"""
Decorator for injecting parameters for ASL objects.
"""
def outer_wrapper(f):
def function_wrapper(ff):
for key, value in viewitems(bindings):
bindings[key] = BindingKey(value)
@functools.wraps(ff)
def _inject(*args, **kwargs):
inj = get_current_app().injector
dependencies = inj.args_to_inject(
function=ff,
bindings=bindings,
owner_key=ff
)
dependencies.update(kwargs)
try:
return ff(*args, **dependencies)
except TypeError as e:
reraise(e, CallError(ff, args, dependencies, e))
return _inject
'''
Just a convenience method - delegates everything to wrapper.
'''
def method_or_class_wrapper(*a, **kwargs):
"""
Properly installs the injector into the object so that the injection can be performed.
"""
inj = get_current_app().injector
# Install injector into the self instance if this is a method call.
inj.install_into(a[0])
# Use the generic wrapper.
inject_f = injector.inject(**bindings)
# Call the method.
return inject_f(f)(*a, **kwargs)
if inspect.ismethod(f):
return method_or_class_wrapper
else:
return function_wrapper(f)
return outer_wrapper | Decorator for injecting parameters for ASL objects. | entailment |
def phylogeny(sequences=None, project_dir=None, name=None, aln_file=None, tree_file=None,
seq_field=None, name_field=None, aa=False, species='human', unrooted=False, ladderize=True,
root=None, root_name=None, show_root_name=False, color_dict=None, color_function=None,
order_dict=None, order_function=None, color_node_labels=False, label_colors=None,
scale=None, branch_vert_margin=None, fontsize=12, show_names=True, show_scale=False,
mirror=False, min_order_fraction=0.1, figname_prefix=None, figname_suffix=None,
# linked_alignment=None, alignment_fontsize=11, alignment_height=50, alignment_width=50,
compact_alignment=False, scale_factor=1, rename_function=None, linewidth=1.0,
delete_nodes=None, quiet=True):
'''
Generates a lineage phylogeny figure.
Args:
sequences (list(Sequence)): A list of ``Sequence`` objects from which a phylogeny
will be calculated. Strictly speaking, they do not need to be ``Sequence`` objects,
rather, any object that contains the sequence name as the ``id`` attribute (or
by dictionary-style lookup using the provided ``name_field``) and contains the
sequence as the ``sequence`` attribute (or by dictionary-stype lookup using the
provided ``seq_field``).
project_dir (str): directory into which all phylogeny files will be deposited,
including alignment, tree and figure files.
name (str): Name to be used for naming alignment, tree, and phylogeny files. If not
provided, a random name will be generated.
aln_file (str): if a multiple sequence alignment has already been calculated,
passing the path to the alignment file (in FASTA format) will force Lineage.phylogeny()
to use the supplied msa instead of computing a new one.
tree_file (str): if a tree file has already been calculated, passing the path
to the pre-computed tree file will force ``phylogeny()`` to use
the supplied tree file instead of computing a new one. It is important to note that
only sequence names will be parsed from the tree_file, so if ``order_function`` or
``color_function`` is also provided, ensure that these functions only require the
sequence ID rather than the entire sequence.
aa (bool): if True, use amino acid sequences to compute the phylogeny.
Default is False.
root (Sequence, str: The root can be provided either as a ``Sequence`` object (if ``sequences``
are being provided) or as the name of a sequence that can be found either in
``sequences`` or in the provided ``aln_file`` or ``tree_file``. Note that if
either ``aln_file`` or ``tree_file`` are provided, the root must be provided
as the sequence name, not as a ``Sequence`` object (as the root sequence must
already be included in either ``aln_file`` or ``tree_file``. If the root is not
provided, the germline V-gene sequence of the
color_dict (dict): Dictionary with sequence IDs as keys and colors (hex format) as values. If any
sequence IDs are not found in the dict, they will be colored black. If neither ``color_dict`` nor
``color_function`` is provided, all leaves will be colored black.
color_function (func): Function that that accepts a ``Sequence`` object and returns the color
(as a hex code). If ``color_dict`` is also provided, ``color_function`` is ignored. Additionally,
``color_function`` will only be used if ``sequences`` are provided. If ``sequences`` are not provided
(instead using ``aln_file` or ``tree_file``), ``color_dict`` must be used instead of ``color_function``.
orders: a dictionary with sequence IDs as keys and orders (integers) as values.
If not provided, only the leaf branches will be colored (if <colors> or
<color_function> is provided).
chain: build a phylogeny using the given chain ('heavy' or 'light').
Default is 'heavy'.
filter_function: function used to filter sequences (identity-based clustering, for
example). The function should accept a list of Sequence objects and return
a list of Sequence objects.
just_pairs: if True, compute the phylogeny using only paired sequences.
Default (False) will use all sequences of the appropriate chain, paired or not.
scale (float): passed to ete3.TreeStyle() to set the scale of the tree figure. Increased
scale results in a wider tree.
branch_vert_margin (int): passed to ete3.TreeStyle() to set the branch_vertical_margin of
the tree figure. Increased branch_vert_margin results in a taller tree.
fontsize: size of the leaf labels. Default is 12.
show_names: show names of leaf nodes. Options are True (show labels for all leaf nodes),
False (don't show labels for any leaf nodes) or a list of sequence IDs for which
labels should be shown. Default is True.
mirror: flip the orientation of the tree. Default is to draw the tree from left to right.
Setting mirror to True results in the tree being drawn from right to left.
min_order_fraction: minimum fraction of downstream leaves requried to color a branch.
When coloring non-leaf nodes, the earliest 'order' with at least <min_order_fraction>
leaf nodes is used. Default is 0.1 (which corresponds to 10%).
figname_prefix: by default, figures will be named <lineage_id>.pdf. If prefix='prefix_' and
the lineage ID is 'ABC123', the figure file will be named 'prefix_ABC123.pdf'.
figname_suffix: by default, figures will be named <lineage_id>.pdf. If suffix='_suffix' and
the lineage ID is 'ABC123', the figure file will be named 'ABC123_suffix.pdf'.
'''
if project_dir is None:
print('\nERROR: project_dir is required\n')
sys.exit(1)
else:
project_dir = os.path.abspath(project_dir)
# make a name if one isn't provided
if name is None:
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
# if sequences are provided, need to process them
if sequences is not None and all([arg is None for arg in [aln_file, tree_file]]):
sequences = deepcopy(sequences)
root = copy(root)
# if custom seq_field is specified, copy to the .sequence attribute
if seq_field is not None:
if not all([seq_field in list(s.annotations.keys()) for s in sequences]):
print('\nERROR: {} is not present in all of the supplied sequences.\n'.format(seq_field))
sys.exit(1)
for s in sequences:
s.alignment_sequence = s[seq_field]
else:
for s in sequences:
s.alignment_sequence = s.sequence
# if custom name_field is specified, copy to the .id attribute
if name_field is not None:
if not all([name_field in list(s.annotations.keys()) for s in sequences]):
print('\nERROR: {} is not present in all of the supplied sequences.\n'.format(name_field))
sys.exit(1)
for s in sequences:
s.alignment_id = s[name_field]
else:
for s in sequences:
s.alignment_id = s.id
# parse the root sequence
if unrooted:
root = None
root_name = None
elif root is None:
if not quiet:
print('\nRoot sequence was was not provided. Using the germline V-gene.')
if not all(['v_gene' in list(s.annotations.keys()) for s in sequences]):
print('\nInput sequences to not appear to be AbStar annotated. Annotating now...')
sequences = abstar.run(*[(s.id, s.sequence) for s in sequences])
print('Done.')
if not all(['full' in list(s['v_gene'].keys()) for s in sequences]):
print('\nInput sequences to not appear to be AbStar annotated. Annotating now...')
sequences = abstar.run(*[(s.id, s.sequence) for s in sequences])
print('Done.')
top_vgene = sorted(list(Counter([s['v_gene']['full'] for s in sequences]).items()),
key=lambda x: x[1],
reverse=True)[0][0]
vgene = get_imgt_germlines(species, 'V', gene=top_vgene)
if aa:
root = Sequence(vgene.ungapped_aa_sequence, id=top_vgene)
else:
root = Sequence(vgene.ungapped_nt_sequence, id=top_vgene)
root.alignment_id = root.id
root.alignment_sequence = root.sequence
if not quiet:
print('Top V-gene: {}'.format(root.alignment_id))
print(root.alignment_sequence)
elif type(root) in STR_TYPES:
root = [s for s in sequences if s.alignment_id == root][0]
if not root:
print('\nERROR: The name of the root sequence ({}) was not found in the list of input sequences.'.format(root))
print('\n')
sys.exit(1)
sequences = [s for s in sequences if s.alignment_id != root.alignment_id]
elif type(root) == Sequence:
if seq_field is not None:
if seq_field not in list(root.anotations.keys()):
print('\nERROR: {} is not present in the supplied root sequence.\n'.format(seq_field))
sys.exit(1)
root.alignment_sequence = root[seq_field]
if name_field is not None:
if name_field not in list(root.anotations.keys()):
print('\nERROR: {} is not present in the supplied root sequence.\n'.format(name_field))
sys.exit(1)
root.alignment_id = root[name_field]
sequences = [s for s in sequences if s.alignment_id != root.alignment_id]
else:
print('\nERROR: If root is provided, it must be the name of a sequence \
found in the supplied list of sequences or it must be a Sequence object.')
print('\n')
sys.exit(1)
if not unrooted:
if root_name is not None:
root.alignment_id = root_name
else:
root_name = root.alignment_id
sequences.append(root)
# parse sequences from aln_file, if provided
elif aln_file is not None:
if not unrooted and type(root) not in STR_TYPES:
print('\nERROR: If providing an aln_file, the name of the root sequence must \
be provided (as a string) using the root keyword argument')
print('\n')
sys.exit(1)
_sequences = []
_root = None
for rec in AlignIO.read(open(aln_file), 'fasta'):
s = str(rec.seq).replace('-', '')
if rec.id == root:
_root = Sequence(s, rec.id)
_root.alignment_id = _root.id
else:
_s = Sequence(s, id=rec.id)
_s.alignment_id = rec.id
_sequences.append(_s)
if sequences is None:
sequences = _sequences
else:
sequence_ids = [s.id for s in sequences]
if any([_s.alignment_id not in sequence_ids for _s in _sequences]):
print('\nWARNING: Sequences were found in the alignment file that were not included \
in the input sequence list. This may cause problems.')
for s in sequences:
s.alignment_id = s.id
s.alignment_sequence = s.sequence
if unrooted:
root = None
root_name = None
else:
if _root is None:
print('\nERROR: The specified root ({}) was not found in the provided alignment file.'.format(root))
print('\n')
sys.exit(1)
root = _root
if root_name is not None:
root.alignment_id = root_name
else:
root_name = root.alignment_id
sequences = [s for s in sequences if all([s.alignment_id != name for name in [root.id, root.alignment_id]])]
sequences.append(root)
# parse sequences from tree_file, if provided
elif tree_file is not None:
if not unrooted and type(root) not in STR_TYPES:
print('\nERROR: If providing a tree_file, the name of the root sequence must \
be provided (as a string) using the root keyword argument')
print('\n')
sys.exit(1)
_sequences = []
_root = None
tree = Phylo.read(open(tree_file), 'newick')
for leaf in tree.get_terminals():
s = ''
if leaf.name == root:
_root = Sequence(s, leaf.name)
_root.alignment_id = _root.id
else:
_s = Sequence(s, id=leaf.name)
_s.alignment_id = leaf.name
_sequences.append(_s)
if sequences is None:
sequences = _sequences
else:
sequence_ids = [s.id for s in sequences]
if any([_s.alignment_id not in sequence_ids for _s in _sequences]):
print('\nWARNING: Sequences were found in the alignment file that were not included \
in the input sequence list. This may cause problems.')
for s in sequences:
s.alignment_id = s.id
s.alignment_sequence = s.sequence
if unrooted:
root = None
root_name = None
elif _root is None:
print('\nERROR: The specified root ({}) was not found in the provided tree file.'.format(root))
print('\n')
sys.exit(1)
else:
root = _root
if root_name is not None:
root.alignment_id = root_name
else:
root_name = root.alignment_id
sequences = [s for s in sequences if all([s.alignment_id != name for name in [root.id, root.alignment_id]])]
sequences.append(root)
# set up colors and color ordering
if order_dict is None:
if order_function is not None:
order_dict = {seq.alignment_id: order_function(seq) for seq in sequences}
if color_dict is None:
if color_function is not None:
color_dict = {seq.alignment_id: color_function(seq) for seq in sequences}
if color_dict is None:
color_dict = {}
# make msa (if necessary)
if all([aln_file is None, tree_file is None]):
aln_file = os.path.abspath(os.path.join(project_dir, '{}.aln'.format(name)))
# muscle(seqs, aln_file, as_file=True)
do_print = False if quiet else True
if do_print:
print('\n')
seqs = [(s.alignment_id, s.alignment_sequence) for s in sequences]
mafft(seqs, aln_file, as_file=True, print_stdout=do_print, print_stderr=do_print)
# make treefile (if necessary)
if tree_file is None:
tree_file = os.path.abspath(os.path.join(project_dir, '{}.nw'.format(name)))
fasttree(aln_file, tree_file, is_aa=aa, quiet=quiet)
# make phylogeny
prefix = '' if figname_prefix is None else figname_prefix
suffix = '' if figname_suffix is None else figname_suffix
fig_file = os.path.join(project_dir, '{}{}{}.pdf'.format(prefix, name, suffix))
_make_tree_figure(tree_file,
fig_file,
color_dict,
order_dict,
None if root is None else root.alignment_id,
rename_function=rename_function,
show_names=show_names,
name_field=name_field,
branch_vert_margin=branch_vert_margin,
scale=scale,
color_node_labels=color_node_labels,
label_colors=label_colors,
show_root_name=show_root_name,
tree_orientation=1 if mirror else 0,
fontsize=fontsize,
min_order_fraction=min_order_fraction,
# linked_alignment=linked_alignment,
# alignment_fontsize=alignment_fontsize,
# alignment_height=alignment_height,
# alignment_width=alignment_width,
show_scale=show_scale,
compact_alignment=compact_alignment,
scale_factor=scale_factor,
linewidth=linewidth,
ladderize=ladderize,
delete_nodes=delete_nodes) | Generates a lineage phylogeny figure.
Args:
sequences (list(Sequence)): A list of ``Sequence`` objects from which a phylogeny
will be calculated. Strictly speaking, they do not need to be ``Sequence`` objects,
rather, any object that contains the sequence name as the ``id`` attribute (or
by dictionary-style lookup using the provided ``name_field``) and contains the
sequence as the ``sequence`` attribute (or by dictionary-stype lookup using the
provided ``seq_field``).
project_dir (str): directory into which all phylogeny files will be deposited,
including alignment, tree and figure files.
name (str): Name to be used for naming alignment, tree, and phylogeny files. If not
provided, a random name will be generated.
aln_file (str): if a multiple sequence alignment has already been calculated,
passing the path to the alignment file (in FASTA format) will force Lineage.phylogeny()
to use the supplied msa instead of computing a new one.
tree_file (str): if a tree file has already been calculated, passing the path
to the pre-computed tree file will force ``phylogeny()`` to use
the supplied tree file instead of computing a new one. It is important to note that
only sequence names will be parsed from the tree_file, so if ``order_function`` or
``color_function`` is also provided, ensure that these functions only require the
sequence ID rather than the entire sequence.
aa (bool): if True, use amino acid sequences to compute the phylogeny.
Default is False.
root (Sequence, str: The root can be provided either as a ``Sequence`` object (if ``sequences``
are being provided) or as the name of a sequence that can be found either in
``sequences`` or in the provided ``aln_file`` or ``tree_file``. Note that if
either ``aln_file`` or ``tree_file`` are provided, the root must be provided
as the sequence name, not as a ``Sequence`` object (as the root sequence must
already be included in either ``aln_file`` or ``tree_file``. If the root is not
provided, the germline V-gene sequence of the
color_dict (dict): Dictionary with sequence IDs as keys and colors (hex format) as values. If any
sequence IDs are not found in the dict, they will be colored black. If neither ``color_dict`` nor
``color_function`` is provided, all leaves will be colored black.
color_function (func): Function that that accepts a ``Sequence`` object and returns the color
(as a hex code). If ``color_dict`` is also provided, ``color_function`` is ignored. Additionally,
``color_function`` will only be used if ``sequences`` are provided. If ``sequences`` are not provided
(instead using ``aln_file` or ``tree_file``), ``color_dict`` must be used instead of ``color_function``.
orders: a dictionary with sequence IDs as keys and orders (integers) as values.
If not provided, only the leaf branches will be colored (if <colors> or
<color_function> is provided).
chain: build a phylogeny using the given chain ('heavy' or 'light').
Default is 'heavy'.
filter_function: function used to filter sequences (identity-based clustering, for
example). The function should accept a list of Sequence objects and return
a list of Sequence objects.
just_pairs: if True, compute the phylogeny using only paired sequences.
Default (False) will use all sequences of the appropriate chain, paired or not.
scale (float): passed to ete3.TreeStyle() to set the scale of the tree figure. Increased
scale results in a wider tree.
branch_vert_margin (int): passed to ete3.TreeStyle() to set the branch_vertical_margin of
the tree figure. Increased branch_vert_margin results in a taller tree.
fontsize: size of the leaf labels. Default is 12.
show_names: show names of leaf nodes. Options are True (show labels for all leaf nodes),
False (don't show labels for any leaf nodes) or a list of sequence IDs for which
labels should be shown. Default is True.
mirror: flip the orientation of the tree. Default is to draw the tree from left to right.
Setting mirror to True results in the tree being drawn from right to left.
min_order_fraction: minimum fraction of downstream leaves requried to color a branch.
When coloring non-leaf nodes, the earliest 'order' with at least <min_order_fraction>
leaf nodes is used. Default is 0.1 (which corresponds to 10%).
figname_prefix: by default, figures will be named <lineage_id>.pdf. If prefix='prefix_' and
the lineage ID is 'ABC123', the figure file will be named 'prefix_ABC123.pdf'.
figname_suffix: by default, figures will be named <lineage_id>.pdf. If suffix='_suffix' and
the lineage ID is 'ABC123', the figure file will be named 'ABC123_suffix.pdf'. | entailment |
def igphyml(input_file=None, tree_file=None, root=None, verbose=False):
'''
Computes a phylogenetic tree using IgPhyML.
.. note::
IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML.
Args:
input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required.
tree_file (str): Path to the output tree file.
root (str): Name of the root sequence. Required.
verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run.
Default is `False`.
'''
if shutil.which('igphyml') is None:
raise RuntimeError('It appears that IgPhyML is not installed.\nPlease install and try again.')
# first, tree topology is estimated with the M0/GY94 model
igphyml_cmd1 = 'igphyml -i {} -m GY -w M0 -t e --run_id gy94'.format(aln_file)
p1 = sp.Popen(igphyml_cmd1, stdout=sp.PIPE, stderr=sp.PIPE)
stdout1, stderr1 = p1.communicate()
if verbose:
print(stdout1 + '\n')
print(stderr1 + '\n\n')
intermediate = input_file + '_igphyml_tree.txt_gy94'
# now we fit the HLP17 model once the tree topology is fixed
igphyml_cmd2 = 'igphyml -i {0} -m HLP17 --root {1} -o lr -u {}_igphyml_tree.txt_gy94 -o {}'.format(input_file,
root,
tree_file)
p2 = sp.Popen(igphyml_cmd2, stdout=sp.PIPE, stderr=sp.PIPE)
stdout2, stderr2 = p2.communicate()
if verbose:
print(stdout2 + '\n')
print(stderr2 + '\n')
return tree_file + '_igphyml_tree.txt' | Computes a phylogenetic tree using IgPhyML.
.. note::
IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML.
Args:
input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required.
tree_file (str): Path to the output tree file.
root (str): Name of the root sequence. Required.
verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run.
Default is `False`. | entailment |
def append_headers(f):
"""
Appends all the web headers:
* ZSL version and information,
* default CORS if not already set up,
* cache.
:param f: The decorated function.
:return: The function which appends the web headers.
"""
@wraps(f)
def _response_decorator(*args, **kwargs):
r = f(*args, **kwargs)
response = r if isinstance(r, Response) else make_response(r)
append_all(response)
return response
return _response_decorator | Appends all the web headers:
* ZSL version and information,
* default CORS if not already set up,
* cache.
:param f: The decorated function.
:return: The function which appends the web headers. | entailment |
def integrate_to_file(what, filename, start_line, end_line):
"""WARNING this is working every second run.. so serious bug
Integrate content into a file withing "line marks"
"""
try:
with open(filename) as f:
lines = f.readlines()
except IOError:
lines = []
tmp_file = tempfile.NamedTemporaryFile(delete=False)
lines.reverse()
# first copy before start line
while lines:
line = lines.pop()
if line == start_line:
break
tmp_file.write(line)
# insert content
tmp_file.write(start_line)
tmp_file.write(what)
tmp_file.write(end_line)
# skip until end line
while lines:
line = lines.pop()
if line == end_line:
break
# copy rest
tmp_file.writelines(lines)
tmp_file.close()
os.rename(tmp_file.name, filename) | WARNING this is working every second run.. so serious bug
Integrate content into a file withing "line marks" | entailment |
def update_model(raw_model, app_model, forbidden_keys=None, inverse=False):
"""Updates the `raw_model` according to the values in the `app_model`.
:param raw_model: Raw model which gets updated.
:param app_model: App model holding the data.
:param forbidden_keys: Data/attributes which will not be updated.
:type forbidden_keys: list
:param inverse: If the value is `True` all `app_model` attributes which are contained in the `raw_model` are
updated. If the value is `False` all `raw_model` properties which are in the `app_model` will be
updated.
"""
if forbidden_keys is None:
forbidden_keys = []
if type(app_model) != dict:
app_model = app_model.__dict__
if inverse:
for k in app_model:
logging.debug("Considering property {0}.".format(k))
if (hasattr(raw_model, k)) and (k not in forbidden_keys):
logging.debug("Setting property {0} to value '{1}'.".format(k, app_model[k]))
setattr(raw_model, k, app_model[k])
else:
for k in raw_model.__dict__:
logging.debug("Considering property {0}.".format(k))
if (k in app_model) and (k not in forbidden_keys):
logging.debug("Setting property {0} to value '{1}'.".format(k, app_model[k]))
setattr(raw_model, k, app_model[k]) | Updates the `raw_model` according to the values in the `app_model`.
:param raw_model: Raw model which gets updated.
:param app_model: App model holding the data.
:param forbidden_keys: Data/attributes which will not be updated.
:type forbidden_keys: list
:param inverse: If the value is `True` all `app_model` attributes which are contained in the `raw_model` are
updated. If the value is `False` all `raw_model` properties which are in the `app_model` will be
updated. | entailment |
def progress_bar(finished, total, start_time=None, extra_info=None,
autocomplete=True, completion_string='/n'):
'''
Prints an ASCII progress bar.
Each call to ``progress_bar`` will update the progress bar. An example
of tracking the progress of a list of items would look like::
job_list = [job1, job2, job3, ... jobN]
total_jobs = len(job_list)
#initialize the progress bar
progress_bar(0, total_jobs)
# do the jobs
for i, job in enumerate(job_list):
do_job(job)
progress_bar(i + 1, total_jobs)
Args:
finished (int): Number of finished jobs.
total (int): Total number of jobs.
start_time (datetime): Start time, as a ``datetime.datetime`` object.
Only required if you want to display execution time alongside
the progress bar. If not provided, execution time is not shown.
extra_info (str): A string containing extra information to be displayed
at the end of the progbar string. Examples include the number of failed
jobs, the name of the job batch currently being processed, etc.
complete (bool): If `True`, will append `completion_string` to the end
of the progbar string.
completion_string (str): Will be appended to the progbar string if
`complete` is `True`. Default is `'\n\n'`.
'''
pct = int(100. * finished / total)
ticks = int(pct / 2)
spaces = int(50 - ticks)
if start_time is not None:
elapsed = (datetime.now() - start_time).seconds
minutes = int(elapsed / 60)
seconds = int(elapsed % 60)
minute_str = '0' * (2 - len(str(minutes))) + str(minutes)
second_str = '0' * (2 - len(str(seconds))) + str(seconds)
prog_bar = '\r({}/{}) |{}{}| {}% ({}:{}) '.format(finished, total,
'|' * ticks, ' ' * spaces, pct, minute_str, second_str)
else:
prog_bar = '\r({}/{}) |{}{}| {}% '.format(finished, total,
'|' * ticks, ' ' * spaces, pct)
if extra_info is not None:
prog_bar += str(extra_info)
if autocomplete and finished == total:
prog_bar += completion_string
sys.stdout.write(prog_bar)
sys.stdout.flush() | Prints an ASCII progress bar.
Each call to ``progress_bar`` will update the progress bar. An example
of tracking the progress of a list of items would look like::
job_list = [job1, job2, job3, ... jobN]
total_jobs = len(job_list)
#initialize the progress bar
progress_bar(0, total_jobs)
# do the jobs
for i, job in enumerate(job_list):
do_job(job)
progress_bar(i + 1, total_jobs)
Args:
finished (int): Number of finished jobs.
total (int): Total number of jobs.
start_time (datetime): Start time, as a ``datetime.datetime`` object.
Only required if you want to display execution time alongside
the progress bar. If not provided, execution time is not shown.
extra_info (str): A string containing extra information to be displayed
at the end of the progbar string. Examples include the number of failed
jobs, the name of the job batch currently being processed, etc.
complete (bool): If `True`, will append `completion_string` to the end
of the progbar string.
completion_string (str): Will be appended to the progbar string if
`complete` is `True`. Default is `'\n\n'`. | entailment |
def _ite(test: str, in1: str, in0: str, output: str = None):
r"test -> in1 /\ ~test -> in0"
assert len({test, in0, in1}) == 3
true_out = bit_flipper([test]) >> or_gate([test, in1], 'true_out')
false_out = or_gate([test, in0], 'false_out')
return (true_out | false_out) >> and_gate(['true_out', 'false_out'],
output) | r"test -> in1 /\ ~test -> in0 | entailment |
def update(self, app_model, forbidden_keys=None, inverse=False):
"""
Updates the raw model. Consult `zsl.utils.model_helper.update_model`.
"""
if forbidden_keys is None:
forbidden_keys = []
update_model(self, app_model, forbidden_keys, inverse) | Updates the raw model. Consult `zsl.utils.model_helper.update_model`. | entailment |
def xml_to_json(element, definition, required=False):
# TODO document tuple - it looks little too complex
"""Convert XML (ElementTree) to dictionary from a definition schema.
Definition schema can be a simple string - XPath or @attribute for
direct extraction or a complex one described by
* dictionary ``{key: 'xpath or @attribute', second: 'complex definition'}`` \
required parameters can be marked with * at the end
* list ``[xpath, [definition]]`` - create a list of all elements found by \
xpath, parse the parts with given definition if provided as second \
argument
* Callable - parse the element by given function, can be handy as a part \
of complex definition
:param element: ElementTree element
:type element: ElementTree.Element
:param definition: schema for the json
:type definition: Union[str, tuple, dict, list, Callable]
:param required: parsed value should be not None
:type required: bool
:return: parsed xml
:rtype: Union[dict, str, list]
"""
# handle simple definition
if isinstance(definition, str) and len(definition) > 0:
if definition[0] == '@': # test for attribute
return element.get(definition[1:])
# get tag text
else:
sub_element = element.find(definition)
if sub_element is None:
if required:
raise NotCompleteXmlException('Expecting {0} in element {1}'.format(definition, element.tag))
return None
return sub_element.text.strip() if sub_element.text else None
# handle tuple
elif isinstance(definition, tuple):
return _parse_tuple(element, definition, required)
# handle dict
elif isinstance(definition, dict):
return _parse_dict(element, definition)
# handle list
elif isinstance(definition, list):
return _parse_list(element, definition)
elif hasattr(definition, '__call__'):
return definition(element)
# default
else:
return element.text.strip() if element.text else None | Convert XML (ElementTree) to dictionary from a definition schema.
Definition schema can be a simple string - XPath or @attribute for
direct extraction or a complex one described by
* dictionary ``{key: 'xpath or @attribute', second: 'complex definition'}`` \
required parameters can be marked with * at the end
* list ``[xpath, [definition]]`` - create a list of all elements found by \
xpath, parse the parts with given definition if provided as second \
argument
* Callable - parse the element by given function, can be handy as a part \
of complex definition
:param element: ElementTree element
:type element: ElementTree.Element
:param definition: schema for the json
:type definition: Union[str, tuple, dict, list, Callable]
:param required: parsed value should be not None
:type required: bool
:return: parsed xml
:rtype: Union[dict, str, list] | entailment |
def _parse_dict(element, definition):
"""Parse xml element by a definition given in dict format.
:param element: ElementTree element
:param definition: definition schema
:type definition: dict
:return: parsed xml
:rtype: dict
"""
sub_dict = {}
for name, subdef in viewitems(definition):
(name, required) = _parse_name(name)
sub_dict[name] = xml_to_json(element, subdef, required)
return sub_dict | Parse xml element by a definition given in dict format.
:param element: ElementTree element
:param definition: definition schema
:type definition: dict
:return: parsed xml
:rtype: dict | entailment |
def _parse_tuple(element, definition, required):
"""Parse xml element by a definition given in tuple format.
:param element: ElementTree element
:param definition: definition schema
:type definition: tuple
:param required: parsed value should be not None
:type required: bool
:return: parsed xml
"""
# TODO needs to be documented properly.
d_len = len(definition)
if d_len == 0:
return None
if d_len == 1:
return xml_to_json(element, definition[0], required)
first = definition[0]
if hasattr(first, '__call__'):
# TODO I think it could be done without creating the array
# first(xml_to_json(element, d) for d in definition[1:]) test it
return first(*[xml_to_json(element, d) for d in definition[1:]])
if not isinstance(first, str):
raise XmlToJsonException('Tuple definition must start with function or string')
if first[0] == '@':
raise XmlToJsonException('Tuple definition must not start with attribute')
sub_elem = element.find(first)
if sub_elem is None:
if required:
raise NotCompleteXmlException('Expecting {0} in element {1}'.format(first, element.tag))
return None
return xml_to_json(sub_elem, definition[1], required) | Parse xml element by a definition given in tuple format.
:param element: ElementTree element
:param definition: definition schema
:type definition: tuple
:param required: parsed value should be not None
:type required: bool
:return: parsed xml | entailment |
def _parse_list(element, definition):
"""Parse xml element by definition given by list.
Find all elements matched by the string given as the first value
in the list (as XPath or @attribute).
If there is a second argument it will be handled as a definitions
for the elements matched or the text when not.
:param element: ElementTree element
:param definition: definition schema
:type definition: list
:return: parsed xml
:rtype: list
"""
if len(definition) == 0:
raise XmlToJsonException('List definition needs some definition')
tag = definition[0]
tag_def = definition[1] if len(definition) > 1 else None
sub_list = []
for el in element.findall(tag):
sub_list.append(xml_to_json(el, tag_def))
return sub_list | Parse xml element by definition given by list.
Find all elements matched by the string given as the first value
in the list (as XPath or @attribute).
If there is a second argument it will be handled as a definitions
for the elements matched or the text when not.
:param element: ElementTree element
:param definition: definition schema
:type definition: list
:return: parsed xml
:rtype: list | entailment |
def _parse_name(name):
"""Parse name in complex dict definition.
In complex definition required params can be marked with `*`.
:param name:
:return: name and required flag
:rtype: tuple
"""
required = False
if name[-1] == '*':
name = name[0:-1]
required = True
return name, required | Parse name in complex dict definition.
In complex definition required params can be marked with `*`.
:param name:
:return: name and required flag
:rtype: tuple | entailment |
def get_host_port(spec, default_port):
"parse 'hostname:22' into a host and port, with the port optional"
args = (spec.split(':', 1) + [default_port])[:2]
args[1] = int(args[1])
return args[0], args[1] | parse 'hostname:22' into a host and port, with the port optional | entailment |
def name(self):
'''
Returns the lineage name, or None if the name cannot be found.
'''
clonify_ids = [p.heavy['clonify']['id'] for p in self.heavies if 'clonify' in p.heavy]
if len(clonify_ids) > 0:
return clonify_ids[0]
return None | Returns the lineage name, or None if the name cannot be found. | entailment |
def verified_pairs(self):
'''
Returns all lineage Pair objects that contain verified pairings.
'''
if not hasattr(self.just_pairs[0], 'verified'):
self.verify_light_chains()
return [p for p in self.just_pairs if p.verified] | Returns all lineage Pair objects that contain verified pairings. | entailment |
def size(self, pairs_only=False):
'''
Calculate the size of the lineage.
Inputs (optional)
-----------------
pairs_only: count only paired sequences
Returns
-------
Lineage size (int)
'''
if pairs_only:
return len(self.just_pairs)
else:
return len(self.heavies) | Calculate the size of the lineage.
Inputs (optional)
-----------------
pairs_only: count only paired sequences
Returns
-------
Lineage size (int) | entailment |
def verify_light_chains(self, threshold=0.9):
'''
Clusters the light chains to identify potentially spurious (non-lineage)
pairings. Following clustering, all pairs in the largest light chain
cluster are assumed to be correctly paired. For each of those pairs,
the <verified> attribute is set to True. For pairs not in the largest
light chain cluster, the <verified> attribute is set to False.
Inputs (optional)
-----------------
threshold: CD-HIT clustering threshold. Default is 0.9.
'''
lseqs = [l.light for l in self.lights]
clusters = cluster(lseqs, threshold=threshold)
clusters.sort(key=lambda x: x.size, reverse=True)
verified_ids = clusters[0].ids
for p in self.lights:
p.verified = True if p.name in verified_ids else False | Clusters the light chains to identify potentially spurious (non-lineage)
pairings. Following clustering, all pairs in the largest light chain
cluster are assumed to be correctly paired. For each of those pairs,
the <verified> attribute is set to True. For pairs not in the largest
light chain cluster, the <verified> attribute is set to False.
Inputs (optional)
-----------------
threshold: CD-HIT clustering threshold. Default is 0.9. | entailment |
def dot_alignment(self, seq_field='vdj_nt', name_field='seq_id', uca=None,
chain='heavy', uca_name='UCA', as_fasta=False, just_alignment=False):
'''
Returns a multiple sequence alignment of all lineage sequence with the UCA
where matches to the UCA are shown as dots and mismatches are shown as the
mismatched residue.
Inputs (optional)
-----------------
seq_field: the sequence field to be used for alignment. Default is 'vdj_nt'.
name_field: field used for the sequence name. Default is 'seq_id'.
chain: either 'heavy' or 'light'. Default is 'heavy'.
Returns
-------
The dot alignment (string)
'''
if uca is None:
uca = self.uca.heavy if chain == 'heavy' else self.uca.light
uca.id = 'UCA'
if chain == 'heavy':
sequences = [p.heavy for p in self.heavies if seq_field in p.heavy]
if name_field != 'seq_id':
uca[name_field] = uca['seq_id']
sequences.append(uca)
seqs = [(s[name_field], s[seq_field]) for s in sequences]
else:
sequences = [p.light for p in self.lights if seq_field in p.light]
if name_field != 'seq_id':
uca[name_field] = uca['seq_id']
sequences.append(uca)
seqs = [(s[name_field], s[seq_field]) for s in sequences]
aln = muscle(seqs)
g_aln = [a for a in aln if a.id == 'UCA'][0]
dots = [(uca_name, str(g_aln.seq)), ]
for seq in [a for a in aln if a.id != 'UCA']:
s_aln = ''
for g, q in zip(str(g_aln.seq), str(seq.seq)):
if g == q == '-':
s_aln += '-'
elif g == q:
s_aln += '.'
else:
s_aln += q
dots.append((seq.id, s_aln))
if just_alignment:
return [d[1] for d in dots]
name_len = max([len(d[0]) for d in dots]) + 2
dot_aln = []
for d in dots:
if as_fasta:
dot_aln.append('>{}\n{}'.format(d[0], d[1]))
else:
spaces = name_len - len(d[0])
dot_aln.append(d[0] + ' ' * spaces + d[1])
return '\n'.join(dot_aln) | Returns a multiple sequence alignment of all lineage sequence with the UCA
where matches to the UCA are shown as dots and mismatches are shown as the
mismatched residue.
Inputs (optional)
-----------------
seq_field: the sequence field to be used for alignment. Default is 'vdj_nt'.
name_field: field used for the sequence name. Default is 'seq_id'.
chain: either 'heavy' or 'light'. Default is 'heavy'.
Returns
-------
The dot alignment (string) | entailment |
def to_child(self, append_message="", **kwargs):
"""Basic implementation of returning a child state"""
bad_pars = set(kwargs) - set(self._child_params)
if bad_pars:
raise KeyError("Invalid init params for State: %s" % ", ".join(bad_pars))
child = copy(self)
for k, v in kwargs.items():
setattr(child, k, v)
child.parent = self
# append messages
if not isinstance(append_message, dict):
append_message = {"msg": append_message, "kwargs": {}}
child.messages = [*self.messages, append_message]
return child | Basic implementation of returning a child state | entailment |
def complex_el_from_dict(parent, data, key):
"""Create element from a dict definition and add it to ``parent``.
:param parent: parent element
:type parent: Element
:param data: dictionary with elements definitions, it can be a simple \
{element_name: 'element_value'} or complex \
{element_name: {_attr: {name: value, name1: value1}, _text: 'text'}}
:param key: element name and key in ``data``
:return: created element
"""
el = ET.SubElement(parent, key)
value = data[key]
if isinstance(value, dict):
if '_attr' in value:
for a_name, a_value in viewitems(value['_attr']):
el.set(a_name, a_value)
if '_text' in value:
el.text = value['_text']
else:
el.text = value
return el | Create element from a dict definition and add it to ``parent``.
:param parent: parent element
:type parent: Element
:param data: dictionary with elements definitions, it can be a simple \
{element_name: 'element_value'} or complex \
{element_name: {_attr: {name: value, name1: value1}, _text: 'text'}}
:param key: element name and key in ``data``
:return: created element | entailment |
def element_from_dict(parent, data, element):
"""Create ``element`` to ``parent`` and sets its value to data[element], which
will be removed from the ``data``.
:param parent: parent element
:type parent: Element
:param data: dictionary where data[element] is desired value
:type data: dict(str, str)
:param element: name of the new element
:type element: str
:return: created element
"""
el = ET.SubElement(parent, element)
el.text = data.pop(element)
return el | Create ``element`` to ``parent`` and sets its value to data[element], which
will be removed from the ``data``.
:param parent: parent element
:type parent: Element
:param data: dictionary where data[element] is desired value
:type data: dict(str, str)
:param element: name of the new element
:type element: str
:return: created element | entailment |
def rss_create(channel, articles):
"""Create RSS xml feed.
:param channel: channel info [title, link, description, language]
:type channel: dict(str, str)
:param articles: list of articles, an article is a dictionary with some \
required fields [title, description, link] and any optional, which will \
result to `<dict_key>dict_value</dict_key>`
:type articles: list(dict(str,str))
:return: root element
:rtype: ElementTree.Element
"""
channel = channel.copy()
# TODO use deepcopy
# list will not clone the dictionaries in the list and `elemen_from_dict`
# pops items from them
articles = list(articles)
rss = ET.Element('rss')
rss.set('version', '2.0')
channel_node = ET.SubElement(rss, 'channel')
element_from_dict(channel_node, channel, 'title')
element_from_dict(channel_node, channel, 'link')
element_from_dict(channel_node, channel, 'description')
element_from_dict(channel_node, channel, 'language')
for article in articles:
item = ET.SubElement(channel_node, 'item')
element_from_dict(item, article, 'title')
element_from_dict(item, article, 'description')
element_from_dict(item, article, 'link')
for key in article:
complex_el_from_dict(item, article, key)
return ET.ElementTree(rss) | Create RSS xml feed.
:param channel: channel info [title, link, description, language]
:type channel: dict(str, str)
:param articles: list of articles, an article is a dictionary with some \
required fields [title, description, link] and any optional, which will \
result to `<dict_key>dict_value</dict_key>`
:type articles: list(dict(str,str))
:return: root element
:rtype: ElementTree.Element | entailment |
def compute_token(random_token, config):
"""Compute a hash of the given token with a preconfigured secret.
:param random_token: random token
:type random_token: str
:return: hashed token
:rtype: str
"""
secure_token = config[TOKEN_SERVICE_SECURITY_CONFIG]
sha1hash = hashlib.sha1()
sha1hash.update(random_token + secure_token)
return sha1hash.hexdigest().upper() | Compute a hash of the given token with a preconfigured secret.
:param random_token: random token
:type random_token: str
:return: hashed token
:rtype: str | entailment |
def verify_security_data(security):
"""Verify an untrusted security token.
:param security: security token
:type security: dict
:return: True if valid
:rtype: bool
"""
random_token = security[TOKEN_RANDOM]
hashed_token = security[TOKEN_HASHED]
return str(hashed_token) == str(compute_token(random_token)) | Verify an untrusted security token.
:param security: security token
:type security: dict
:return: True if valid
:rtype: bool | entailment |
def initialize(log_file, project_dir=None, debug=False):
'''
Initializes an AbTools pipeline.
Initialization includes printing the AbTools splash, setting up logging,
creating the project directory, and logging both the project directory
and the log location.
Args:
log_file (str): Path to the log file. Required.
project_dir (str): Path to the project directory. If not provided,
the project directory won't be created and the location won't be logged.
debug (bool): If ``True``, the logging level will be set to ``logging.DEBUG``.
Default is ``FALSE``, which logs at ``logging.INFO``.
Returns:
logger
'''
print_splash()
log.setup_logging(log_file, print_log_location=False, debug=debug)
logger = log.get_logger('pipeline')
if project_dir is not None:
make_dir(os.path.normpath(project_dir))
logger.info('PROJECT DIRECTORY: {}'.format(project_dir))
logger.info('')
logger.info('LOG LOCATION: {}'.format(log_file))
print('')
return logger | Initializes an AbTools pipeline.
Initialization includes printing the AbTools splash, setting up logging,
creating the project directory, and logging both the project directory
and the log location.
Args:
log_file (str): Path to the log file. Required.
project_dir (str): Path to the project directory. If not provided,
the project directory won't be created and the location won't be logged.
debug (bool): If ``True``, the logging level will be set to ``logging.DEBUG``.
Default is ``FALSE``, which logs at ``logging.INFO``.
Returns:
logger | entailment |
def list_files(d, extension=None):
'''
Lists files in a given directory.
Args:
d (str): Path to a directory.
extension (str): If supplied, only files that contain the
specificied extension will be returned. Default is ``False``,
which returns all files in ``d``.
Returns:
list: A sorted list of file paths.
'''
if os.path.isdir(d):
expanded_dir = os.path.expanduser(d)
files = sorted(glob.glob(expanded_dir + '/*'))
else:
files = [d, ]
if extension is not None:
if type(extension) in STR_TYPES:
extension = [extension, ]
files = [f for f in files if any([f.split('.')[-1] in extension,
f.split('.')[-1].upper() in extension,
f.split('.')[-1].lower() in extension])]
return files | Lists files in a given directory.
Args:
d (str): Path to a directory.
extension (str): If supplied, only files that contain the
specificied extension will be returned. Default is ``False``,
which returns all files in ``d``.
Returns:
list: A sorted list of file paths. | entailment |
def _get_version(ctx, _, value):
"""Click callback for option to show current ZSL version."""
if not value or ctx.resilient_parsing:
return
message = 'Zsl %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': version,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit() | Click callback for option to show current ZSL version. | entailment |
def create_model_resource(resource_map, name, app=Injected):
"""Create a model resource from a dict ``resource_map``
{'resource name': ('model package', 'model class')}
:param resource_map: dict with resource descriptions
:type resource_map: dict(str, tuple(str))
:param name: name of the concrete resource
:param app: current application, injected
:type app: Zsl
:return: instantiated model resource
"""
try:
resource_description = resource_map[name]
if len(resource_description) == 2:
module_name, model_name = resource_map[name]
resource_class = ModelResource
elif len(resource_description) == 3:
module_name, model_name, resource_class = resource_map[name]
else:
raise ImportError("Invalid resource description for resource '{0}'".format(name))
except KeyError:
raise ImportError("Missing resource description for resource '{0}'".format(name))
module = importlib.import_module(module_name)
model_cls = getattr(module, model_name)
return app.injector.create_object(resource_class, {'model_cls': model_cls}) | Create a model resource from a dict ``resource_map``
{'resource name': ('model package', 'model class')}
:param resource_map: dict with resource descriptions
:type resource_map: dict(str, tuple(str))
:param name: name of the concrete resource
:param app: current application, injected
:type app: Zsl
:return: instantiated model resource | entailment |
def dict_pick(dictionary, allowed_keys):
"""
Return a dictionary only with keys found in `allowed_keys`
"""
return {key: value for key, value in viewitems(dictionary) if key in allowed_keys} | Return a dictionary only with keys found in `allowed_keys` | entailment |
def page_to_offset(params):
"""
Transforms `page`/`per_page` from `params` to `limit`/`offset` suitable for SQL.
:param dict params: The dictionary containing `page` and `per_page` values will be added
the values `limit` and `offset`.
"""
if 'page' not in params:
return
page = params['page']
del params['page']
# 'per_page' je len alias za 'limit'
if 'per_page' in params:
per_page = params.get('per_page')
del params['per_page']
else:
per_page = params.get('limit', 10)
params['offset'] = int(per_page) * (int(page) - 1)
params['limit'] = per_page | Transforms `page`/`per_page` from `params` to `limit`/`offset` suitable for SQL.
:param dict params: The dictionary containing `page` and `per_page` values will be added
the values `limit` and `offset`. | entailment |
def create(self, params, args, data):
# type: (str, dict, dict) -> AppModel
"""
POST /resource/model_cls/
data
Create new resource
"""
ctx = self._create_context(params, args, data)
model = self._create_one(ctx)
self._save_one(model, ctx)
return self._return_saved_one(model, ctx) | POST /resource/model_cls/
data
Create new resource | entailment |
def read(self, params=None, args=None, data=None):
# type: (str, dict, dict) -> Union[List[AppModel], AppModel]
"""
GET /resource/model_cls/[params:id]?[args:{limit,offset,page,per_page,filter_by,order_by,related,fields}]
Get resource/s
:param params
:type params list
:param args
:type args dict
:param data
:type data: dict
"""
if params is None:
params = []
if args is None:
args = {}
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if row_id:
return self._get_one(row_id, ctx)
elif 'count' in args:
return self._get_collection_count(ctx)
elif 'desc' in args:
return self._get_collection_desc(ctx)
else:
if 'page' in ctx.args:
page_to_offset(ctx.args)
return self._get_collection(ctx) | GET /resource/model_cls/[params:id]?[args:{limit,offset,page,per_page,filter_by,order_by,related,fields}]
Get resource/s
:param params
:type params list
:param args
:type args dict
:param data
:type data: dict | entailment |
def update(self, params, args, data):
# type: (str, dict, dict) -> Union[List[AppModel], AppModel]
"""
PUT /resource/model_cls/[params:id]
data
Update resource/s
"""
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if row_id is not None:
model = self._update_one(ctx)
return None if model is None else model.get_app_model()
else:
return app_models(self._update_collection(ctx)) | PUT /resource/model_cls/[params:id]
data
Update resource/s | entailment |
def delete(self, params, args, data):
# type: (str, dict, dict) -> None
"""
DELETE /resource/model_cls/[params]?[args]
delete resource/s
"""
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if row_id is not None:
return self._delete_one(row_id, ctx)
else:
return self._delete_collection(ctx) | DELETE /resource/model_cls/[params]?[args]
delete resource/s | entailment |
def _create_one(self, ctx):
"""
Creates an instance to be saved when a model is created.
"""
assert isinstance(ctx, ResourceQueryContext)
fields = dict_pick(ctx.data, self._model_columns)
model = self.model_cls(**fields)
return model | Creates an instance to be saved when a model is created. | entailment |
def _save_one(self, model, ctx):
"""
Saves the created instance.
"""
assert isinstance(ctx, ResourceQueryContext)
self._orm.add(model)
self._orm.flush() | Saves the created instance. | entailment |
def _update_one(self, ctx):
"""
Update row
"""
assert isinstance(ctx, ResourceQueryContext)
fields = ctx.data
row_id = ctx.get_row_id()
return self._update_one_simple(row_id, fields, ctx) | Update row | entailment |
def _update_collection(self, ctx):
"""
Bulk update
"""
assert isinstance(ctx, ResourceQueryContext)
models = []
for row in ctx.data:
models.append(self._update_one_simple(row.pop('id'), row, ctx))
return models | Bulk update | entailment |
def _create_delete_one_query(self, row_id, ctx):
"""
Delete row by id query creation.
:param int row_id: Identifier of the deleted row.
:param ResourceQueryContext ctx: The context of this delete query.
"""
assert isinstance(ctx, ResourceQueryContext)
return self._orm.query(self.model_cls).filter(self._model_pk == row_id) | Delete row by id query creation.
:param int row_id: Identifier of the deleted row.
:param ResourceQueryContext ctx: The context of this delete query. | entailment |
def _delete_collection(self, ctx):
"""
Delete a collection from DB, optionally filtered by ``filter_by``
"""
assert isinstance(ctx, ResourceQueryContext)
filter_by = ctx.get_filter_by()
q = self._orm.query(self.model_cls)
if filter_by is not None:
q = self.to_filter(q, filter_by)
return q.delete() | Delete a collection from DB, optionally filtered by ``filter_by`` | entailment |
def send_email(sender, receivers, subject, text=None, html=None, charset='utf-8', config=Injected):
"""Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration
"""
smtp_config = config['SMTP']
# Receivers must be an array.
if not isinstance(receivers, list) and not isinstance(receivers, tuple):
receivers = [receivers]
# Create the messages
msgs = []
if text is not None:
msgs.append(MIMEText(text, 'plain', charset))
if html is not None:
msgs.append(MIMEText(html, 'html', charset))
if len(msgs) == 0:
raise Exception("No message is given.")
if len(msgs) == 1:
msg = msgs[0]
else:
msg = MIMEMultipart()
for m in msgs:
msg.attach(m)
# Default sender.
if sender is None:
sender = smtp_config['SENDER']
# Fill the info.
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ", ".join(receivers)
# Send.
smtp_server = smtplib.SMTP(**(smtp_config['SERVER']))
smtp_server.sendmail(sender, receivers, msg.as_string())
smtp_server.quit() | Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration | entailment |
def add_packages(self, packages):
"""
Adds an automatic resolution of urls into tasks.
:param packages: The url will determine package/module and the class.
:return: self
"""
# type: (List[str])->TaskNamespace
assert isinstance(packages, list), "Packages must be list of strings."
self._task_packages += packages
return self | Adds an automatic resolution of urls into tasks.
:param packages: The url will determine package/module and the class.
:return: self | entailment |
def route(self, path):
# type: (str)->Tuple[Any, Callable]
"""
Returns the task handling the given request path.
"""
logging.getLogger(__name__).debug("Routing path '%s'.", path)
cls = None
for strategy in self._strategies:
if strategy.can_route(path):
cls = strategy.route(path)
break
if cls is None:
raise RoutingError(path)
return self._create_result(cls) | Returns the task handling the given request path. | entailment |
def _create_result(self, cls):
# type:(Callable)->Tuple[Any, Callable]
"""
Create the task using the injector initialization.
:param cls:
:return:
"""
task = instantiate(cls)
logging.getLogger(__name__).debug("Task object {0} created [{1}].".format(cls.__name__, task))
return task, get_callable(task) | Create the task using the injector initialization.
:param cls:
:return: | entailment |
def abi_to_fasta(input, output):
'''
Converts ABI or AB1 files to FASTA format.
Args:
input (str): Path to a file or directory containing abi/ab1 files or
zip archives of abi/ab1 files
output (str): Path to a directory for the output FASTA files
'''
direcs = [input, ]
# unzip any zip archives
zip_files = list_files(input, ['zip'])
if zip_files:
direcs.extend(_process_zip_files(zip_files))
# convert files
for d in direcs:
files = list_files(d, ['ab1', 'abi'])
seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files]
# seqs = list(chain.from_iterable(seqs))
fastas = ['>{}\n{}'.format(s.id, str(s.seq)) for s in seqs]
ofile = os.path.basename(os.path.normpath(d)) + '.fasta'
opath = os.path.join(output, ofile)
open(opath, 'w').write('\n'.join(fastas)) | Converts ABI or AB1 files to FASTA format.
Args:
input (str): Path to a file or directory containing abi/ab1 files or
zip archives of abi/ab1 files
output (str): Path to a directory for the output FASTA files | entailment |
def extend(instance, new_class):
"""Adds new_class to the ancestors of instance.
:param instance: Instance that will have a new ancestor.
:param new_class: Ancestor.
"""
instance.__class__ = type(
'%s_extended_with_%s' % (instance.__class__.__name__, new_class.__name__),
(new_class, instance.__class__,),
{}
) | Adds new_class to the ancestors of instance.
:param instance: Instance that will have a new ancestor.
:param new_class: Ancestor. | entailment |
def generate_js_models(module, models, collection_prefix, model_prefix,
model_fn, collection_fn, marker, integrate, js_file):
# type: (str, str, str, str, str, str, str, bool, str) -> Union[str, None]
"""Generate models for Backbone Javascript applications.
:param module: module from which models are imported
:param models: model name, can be a tuple WineCountry/WineCountries as singular/plural
:param model_prefix: namespace prefix for models (app.models.)
:param collection_prefix: namespace prefix for collection (App.collections.)
:param model_fn: name of model constructor (MyApp.bb.Model)
:param collection_fn: name of collection constructor (MyApp.bb.Collection)
:param marker: marker to indicate the auto generated code
:param integrate: integrate to file
:param js_file: file to integrate
:return: generated models or nothing if writing into a file
"""
options = {
'model_prefix': model_prefix,
'collection_prefix': collection_prefix,
'model_fn': model_fn,
'collection_fn': collection_fn
}
generator = ModelGenerator(module,
**{o: options[o] for o in options if options[o] is not None})
models = generator.generate_models(parse_model_arg(models))
if integrate:
sys.stderr.write("Integrate is really experimental")
if not marker:
marker = hashlib.md5("{0}{1}".format(module, models)).hexdigest()
start = "// * -- START AUTOGENERATED %s -- * //\n" % marker
end = "// * -- END AUTOGENERATED %s -- * //\n" % marker
return integrate_to_file("\n".join(models), js_file, start, end)
else:
return "\n".join(models) | Generate models for Backbone Javascript applications.
:param module: module from which models are imported
:param models: model name, can be a tuple WineCountry/WineCountries as singular/plural
:param model_prefix: namespace prefix for models (app.models.)
:param collection_prefix: namespace prefix for collection (App.collections.)
:param model_fn: name of model constructor (MyApp.bb.Model)
:param collection_fn: name of collection constructor (MyApp.bb.Collection)
:param marker: marker to indicate the auto generated code
:param integrate: integrate to file
:param js_file: file to integrate
:return: generated models or nothing if writing into a file | entailment |
def _map_table_name(self, model_names):
"""
Pre foregin_keys potrbejeme pre z nazvu tabulky zistit class,
tak si to namapujme
"""
for model in model_names:
if isinstance(model, tuple):
model = model[0]
try:
model_cls = getattr(self.models, model)
self.table_to_class[class_mapper(model_cls).tables[0].name] = model
except AttributeError:
pass | Pre foregin_keys potrbejeme pre z nazvu tabulky zistit class,
tak si to namapujme | entailment |
def push_msg(self, channel_id, msg):
"""Push ``msg`` for given ``channel_id``. If ``msg`` is not string, it
will be urlencoded
"""
if type(msg) is not str:
msg = urlencode(msg)
return self.push(channel_id, msg) | Push ``msg`` for given ``channel_id``. If ``msg`` is not string, it
will be urlencoded | entailment |
def push_object(self, channel_id, obj):
"""Push ``obj`` for ``channel_id``. ``obj`` will be encoded as JSON in
the request.
"""
return self.push(channel_id, json.dumps(obj).replace('"', '\\"')) | Push ``obj`` for ``channel_id``. ``obj`` will be encoded as JSON in
the request. | entailment |
def push(self, channel_id, data):
"""Push message with POST ``data`` for ``channel_id``
"""
channel_path = self.channel_path(channel_id)
response = requests.post(channel_path, data)
return response.json() | Push message with POST ``data`` for ``channel_id`` | entailment |
def delete_channel(self, channel_id):
"""Deletes channel
"""
req = requests.delete(self.channel_path(channel_id))
return req | Deletes channel | entailment |
def slugify(value, allow_unicode=False):
"""Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
:param value: string
:param allow_unicode: allow utf8 characters
:type allow_unicode: bool
:return: slugified string
:rtype: str
:Example:
>>> slugify('pekná líščička')
'pekna-liscicka'
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub(r'[^\w\s-]', '', value, flags=re.U).strip().lower()
return re.sub(r'[-\s]+', '-', value, flags=re.U)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value) | Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
:param value: string
:param allow_unicode: allow utf8 characters
:type allow_unicode: bool
:return: slugified string
:rtype: str
:Example:
>>> slugify('pekná líščička')
'pekna-liscicka' | entailment |
def urlencode(query):
"""Encode string to be used in urls (percent encoding).
:param query: string to be encoded
:type query: str
:return: urlencoded string
:rtype: str
:Example:
>>> urlencode('pekná líščička')
'pekn%C3%A1%20l%C3%AD%C5%A1%C4%8Di%C4%8Dka'
"""
if hasattr(urllib, 'parse'):
return urllib.parse.urlencode(query)
else:
return urllib.urlencode(query) | Encode string to be used in urls (percent encoding).
:param query: string to be encoded
:type query: str
:return: urlencoded string
:rtype: str
:Example:
>>> urlencode('pekná líščička')
'pekn%C3%A1%20l%C3%AD%C5%A1%C4%8Di%C4%8Dka' | entailment |
def initialize():
"""
Import in this form is necessary so that we avoid the unwanted behavior and immediate initialization of the
application objects. This makes the initialization procedure run in the time when it is necessary and has every
required resources.
"""
from zsl.interface.web.performers.default import create_not_found_mapping
from zsl.interface.web.performers.resource import create_resource_mapping
create_not_found_mapping()
create_resource_mapping() | Import in this form is necessary so that we avoid the unwanted behavior and immediate initialization of the
application objects. This makes the initialization procedure run in the time when it is necessary and has every
required resources. | entailment |
def run_web(self, flask, host='127.0.0.1', port=5000, **options):
# type: (Zsl, str, int, **Any)->None
"""Alias for Flask.run"""
return flask.run(
host=flask.config.get('FLASK_HOST', host),
port=flask.config.get('FLASK_PORT', port),
debug=flask.config.get('DEBUG', False),
**options
) | Alias for Flask.run | entailment |
def execute_command(self, command, args=None):
"""
Execute a command
:param command: name of the command
:type command: str
:param args: optional named arguments for command
:type args: dict
:return: the result of command
:raises KeyError: if command is not found
"""
if args is None:
args = {}
command_fn = self.commands[command]
return command_fn(**args) | Execute a command
:param command: name of the command
:type command: str
:param args: optional named arguments for command
:type args: dict
:return: the result of command
:raises KeyError: if command is not found | entailment |
def bound(self, instance):
"""
Return a new dispatcher, which will switch all command functions
with bounded methods of given instance matched by name. It will
match only regular methods.
:param instance: object instance
:type instance: object
:return: new Dispatcher
:rtype: CommandDispatcher
"""
bounded_dispatcher = CommandDispatcher()
bounded_dispatcher.commands = self.commands.copy()
for name in self.commands:
method = getattr(instance, name, None)
if method and inspect.ismethod(method) and method.__self__ == instance:
bounded_dispatcher.commands[name] = method
return bounded_dispatcher | Return a new dispatcher, which will switch all command functions
with bounded methods of given instance matched by name. It will
match only regular methods.
:param instance: object instance
:type instance: object
:return: new Dispatcher
:rtype: CommandDispatcher | entailment |
def compress_and_upload(data, compressed_file, s3_path, multipart_chunk_size_mb=500,
method='gz', delete=False, access_key=None, secret_key=None):
'''
Compresses data and uploads to S3.
S3 upload uses ``s3cmd``, so you must either:
1) Manually configure ``s3cmd`` prior to use (typically using ``s3cmd --configure``).
2) Configure ``s3cmd`` using ``s3.configure()``.
3) Pass your access key and secret key to ``compress_and_upload``, which will automatically configure s3cmd.
.. note:
``s3cmd`` configuration only needs to be done once per computer,
which means that relaunching a cloud instance or Docker image will
require re-configuration of ``s3cmd``.
Args:
data: Can be one of three things:
1) Path to a single file
2) Path to a directory
3) A list of one or more paths to files or directories
compressed_file (str): Path to the compressed file. Required.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``compressed_file``. For example::
compress_and_upload(data='/path/to/data',
compressed_file='/path/to/compressed.tar.gz',
s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/compressed.tar.gz``
method (str): Compression method. Options are ``'gz'`` (gzip) or ``'bz2'`` (bzip2).
Default is ``'gz'``.
delete (bool): If ``True``, the ``compressed_file`` will be deleted after upload
to S3. Default is ``False``.
access_key (str): AWS access key.
secret_key (str): AWS secret key.
'''
logger = log.get_logger('s3')
if all([access_key, secret_key]):
configure(access_key=access_key, secret_key=secret_key, logger=logger)
compress(data, compressed_file, fmt=method, logger=logger)
put(compressed_file, s3_path, multipart_chunk_size_mb=multipart_chunk_size_mb, logger=logger)
if delete:
os.unlink(compressed_file) | Compresses data and uploads to S3.
S3 upload uses ``s3cmd``, so you must either:
1) Manually configure ``s3cmd`` prior to use (typically using ``s3cmd --configure``).
2) Configure ``s3cmd`` using ``s3.configure()``.
3) Pass your access key and secret key to ``compress_and_upload``, which will automatically configure s3cmd.
.. note:
``s3cmd`` configuration only needs to be done once per computer,
which means that relaunching a cloud instance or Docker image will
require re-configuration of ``s3cmd``.
Args:
data: Can be one of three things:
1) Path to a single file
2) Path to a directory
3) A list of one or more paths to files or directories
compressed_file (str): Path to the compressed file. Required.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``compressed_file``. For example::
compress_and_upload(data='/path/to/data',
compressed_file='/path/to/compressed.tar.gz',
s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/compressed.tar.gz``
method (str): Compression method. Options are ``'gz'`` (gzip) or ``'bz2'`` (bzip2).
Default is ``'gz'``.
delete (bool): If ``True``, the ``compressed_file`` will be deleted after upload
to S3. Default is ``False``.
access_key (str): AWS access key.
secret_key (str): AWS secret key. | entailment |
def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):
'''
Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``
'''
if not logger:
logger = log.get_logger('s3')
fname = os.path.basename(f)
target = os.path.join(s3_path, fname)
s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,
target,
multipart_chunk_size_mb)
print_put_info(fname, target, logger)
s3cmd = sp.Popen(s3cmd_cline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
stdout, stderr = s3cmd.communicate() | Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz`` | entailment |
def compress(d, output, fmt='gz', logger=None):
'''
Creates a compressed/uncompressed tar file.
Args:
d: Can be one of three things:
1. the path to a single file, as a string
2. the path to a single directory, as a string
3. an iterable of file or directory paths
output (str): Output file path.
fmt: Compression method. Options are ``'gz'`` (gzip),
``'bz2'`` (bzip2) and ``'none'`` (uncompressed). Default is ``'gz'``.
'''
if not logger:
logger = log.get_logger('s3')
if type(d) not in [list, tuple]:
d = [d, ]
d = [os.path.expanduser(_d) for _d in d]
print_compress_info(d, output, compress, logger)
if fmt.lower() == 'none':
fmt = ''
elif fmt.lower() not in ['gz', 'bz2']:
logger.info('Compression option ("{}") is invalid.\nFalling back to uncompressed.'.format(fmt))
fmt = ''
output = os.path.expanduser(output)
tar = tarfile.open(output, 'w:{}'.format(fmt))
for obj in d:
tar.add(obj)
tar.close()
return output | Creates a compressed/uncompressed tar file.
Args:
d: Can be one of three things:
1. the path to a single file, as a string
2. the path to a single directory, as a string
3. an iterable of file or directory paths
output (str): Output file path.
fmt: Compression method. Options are ``'gz'`` (gzip),
``'bz2'`` (bzip2) and ``'none'`` (uncompressed). Default is ``'gz'``. | entailment |
def configure(access_key=None, secret_key=None, logger=None):
'''
Configures s3cmd prior to first use.
If no arguments are provided, you will be prompted to enter
the access key and secret key interactively.
Args:
access_key (str): AWS access key
secret_key (str): AWS secret key
'''
if not logger:
logger = log.get_logger('s3')
if not all([access_key, secret_key]):
logger.info('')
access_key = input('AWS Access Key: ')
secret_key = input('AWS Secret Key: ')
_write_config(access_key, secret_key)
logger.info('')
logger.info('Completed writing S3 config file.')
logger.info('') | Configures s3cmd prior to first use.
If no arguments are provided, you will be prompted to enter
the access key and secret key interactively.
Args:
access_key (str): AWS access key
secret_key (str): AWS secret key | entailment |
def required_params(data, *r_params):
"""Check if given parameters are in the given dict, if not raise an
exception.
:param data: data to check
:type data: dict
:param r_params: required parameters
:raises RequestException: if params not in data
"""
if not reduce(lambda still_valid, param: still_valid and param in data,
r_params, True):
raise RequestException(msg_err_missing_params(*r_params)) | Check if given parameters are in the given dict, if not raise an
exception.
:param data: data to check
:type data: dict
:param r_params: required parameters
:raises RequestException: if params not in data | entailment |
def safe_args(fn, args):
"""Check if ``args`` as a dictionary has the required parameters of ``fn``
function and filter any waste parameters so ``fn`` can be safely called
with them.
:param fn: function object
:type fn: Callable
:param args: dictionary of parameters
:type args: dict
:return: dictionary to be used as named params for the ``fn``
:rtype: dict
"""
fn_args = inspect.getargspec(fn)
if fn_args.defaults:
required_params(args, fn_args.args[:-len(fn_args.defaults)])
else:
required_params(args, fn_args)
if not fn_args.keywords:
return {key: value for key, value in viewitems(args) if key in fn_args.args}
else:
return args | Check if ``args`` as a dictionary has the required parameters of ``fn``
function and filter any waste parameters so ``fn`` can be safely called
with them.
:param fn: function object
:type fn: Callable
:param args: dictionary of parameters
:type args: dict
:return: dictionary to be used as named params for the ``fn``
:rtype: dict | entailment |
def get_db(db, ip='localhost', port=27017, user=None, password=None):
'''
Returns a pymongo Database object.
.. note:
Both ``user`` and ``password`` are required when connecting to a MongoDB
database that has authentication enabled.
Arguments:
db (str): Name of the MongoDB database. Required.
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (int): Port of the MongoDB server. Default is ``27017``.
user (str): Username, if authentication is enabled on the MongoDB database.
Default is ``None``, which results in requesting the connection
without authentication.
password (str): Password, if authentication is enabled on the MongoDB database.
Default is ``None``, which results in requesting the connection
without authentication.
'''
if platform.system().lower() == 'darwin':
connect = False
else:
connect = True
if user and password:
import urllib
pwd = urllib.quote_plus(password)
uri = 'mongodb://{}:{}@{}:{}'.format(user, pwd, ip, port)
conn = MongoClient(uri, connect=connect)
else:
conn = MongoClient(ip, port, connect=connect)
return conn[db] | Returns a pymongo Database object.
.. note:
Both ``user`` and ``password`` are required when connecting to a MongoDB
database that has authentication enabled.
Arguments:
db (str): Name of the MongoDB database. Required.
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (int): Port of the MongoDB server. Default is ``27017``.
user (str): Username, if authentication is enabled on the MongoDB database.
Default is ``None``, which results in requesting the connection
without authentication.
password (str): Password, if authentication is enabled on the MongoDB database.
Default is ``None``, which results in requesting the connection
without authentication. | entailment |
def get_collections(db, collection=None, prefix=None, suffix=None):
'''
Returns a sorted list of collection names found in ``db``.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of a collection. If the collection is
present in the MongoDB database, a single-element list will
be returned with the collecion name. If not, an empty list
will be returned. This option is primarly included to allow
for quick checking to see if a collection name is present.
Default is None, which results in this option being ignored.
prefix (str): If supplied, only collections that begin with
``prefix`` will be returned.
suffix (str): If supplied, only collections that end with
``suffix`` will be returned.
Returns:
list: A sorted list of collection names.
'''
if collection is not None:
return [collection, ]
collections = db.collection_names(include_system_collections=False)
if prefix is not None:
collections = [c for c in collections if c.startswith(prefix)]
if suffix is not None:
collections = [c for c in collections if c.endswith(suffix)]
return sorted(collections) | Returns a sorted list of collection names found in ``db``.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of a collection. If the collection is
present in the MongoDB database, a single-element list will
be returned with the collecion name. If not, an empty list
will be returned. This option is primarly included to allow
for quick checking to see if a collection name is present.
Default is None, which results in this option being ignored.
prefix (str): If supplied, only collections that begin with
``prefix`` will be returned.
suffix (str): If supplied, only collections that end with
``suffix`` will be returned.
Returns:
list: A sorted list of collection names. | entailment |
def rename_collection(db, collection, new_name):
'''
Renames a MongoDB collection.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of the collection to be renamed.
new_name (str, func): ``new_name`` can be one of two things::
1. The new collection name, as a string.
2. A function which, when passed the current collection name,
returns the new collection name. If the function
returns an empty string, the collection will not be
renamed.
'''
if hasattr(new_name, '__call__'):
_new = new_name(collection)
if _new == '':
return
else:
_new = new_name
c = db[collection]
c.rename(_new) | Renames a MongoDB collection.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of the collection to be renamed.
new_name (str, func): ``new_name`` can be one of two things::
1. The new collection name, as a string.
2. A function which, when passed the current collection name,
returns the new collection name. If the function
returns an empty string, the collection will not be
renamed. | entailment |
def update(field, value, db, collection, match=None):
'''
Updates MongoDB documents.
Sets ``field`` equal to ``value`` for all documents that
meet ``match`` criteria.
Arguments:
field (str): Field to update.
value (str): Update value.
db (Database): A pymongo Database object.
collection (str): Collection name.
match (dict): A dictionary containing the match criteria, for example::
{'seq_id': {'$in': ['a', 'b', 'c']}, 'cdr3_len': {'$gte': 18}}
'''
c = db[collection]
match = match if match is not None else {}
# check MongoDB version to use appropriate update command
if db.client.server_info()['version'].startswith('2'):
c.update(match, {'$set': {field: value}}, multi=True)
else:
c.update_many(match, {'$set': {field: value}}) | Updates MongoDB documents.
Sets ``field`` equal to ``value`` for all documents that
meet ``match`` criteria.
Arguments:
field (str): Field to update.
value (str): Update value.
db (Database): A pymongo Database object.
collection (str): Collection name.
match (dict): A dictionary containing the match criteria, for example::
{'seq_id': {'$in': ['a', 'b', 'c']}, 'cdr3_len': {'$gte': 18}} | entailment |
def mongoimport(json, database,
ip='localhost', port=27017,
user=None, password=None,
delim='_', delim1=None, delim2=None,
delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
'''
Performs mongoimport on one or more json files.
Args:
json: Can be one of several things:
- path to a single JSON file
- an iterable (list or tuple) of one or more JSON file paths
- path to a directory containing one or more JSON files
database (str): Name of the database into which the JSON files
will be imported
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (int): Port of the MongoDB database. Default is ``27017``.
user (str): Username for the MongoDB database, if authentication is enabled.
Default is ``None``, which results in attempting connection without
authentication.
password (str): Password for the MongoDB database, if authentication is enabled.
Default is ``None``, which results in attempting connection without
authentication.
delim (str): Delimiter, when generating collection names using a single delimiter.
Default is ``_``
delim_occurance (int): Occurance at which to split filename when using a
single delimiter. Default is ``1``
delim1 (str): Left delimiter when splitting with two delimiters. Default is None.
delim1_occurance (int): Occurance of ``delim1`` at which to split filename.
Default is ``1``
delim2 (str): Right delimiter when splitting with two delimiters. Default is None.
delim2_occurance (int): Occurance of ``delim2`` at which to split filename.
Default is ``1``
'''
logger = log.get_logger('mongodb')
_print_mongoimport_info(logger)
if type(json) in (list, tuple):
pass
elif os.path.isdir(json):
from abtools.utils.pipeline import list_files
json = list_files(json)
else:
json = [json, ]
jsons = sorted([os.path.expanduser(j) for j in json if j.endswith('.json')])
collections = _get_import_collections(jsons, delim, delim_occurance,
delim1, delim1_occurance,
delim2, delim2_occurance)
logger.info('Found {} files to import'.format(len(jsons)))
logger.info('')
for i, (json_file, collection) in enumerate(zip(jsons, collections)):
logger.info('[ {} ] {} --> {}'.format(i + 1, os.path.basename(json_file), collection))
# logger.info("Performing mongoimport on {}.".format(os.path.basename(json_file)))
# logger.info("Importing the file into collection {}.".format(collection))
if all([user, password]):
host = '--host {} --port {} -username {} -password {}'.format(ip, port, user, password)
else:
host = '--host {} --port {}'.format(ip, port)
mongo_cmd = "mongoimport {} --db {} --collection {} --file {}".format(
host, database, collection, json_file)
mongo = sp.Popen(mongo_cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = mongo.communicate() | Performs mongoimport on one or more json files.
Args:
json: Can be one of several things:
- path to a single JSON file
- an iterable (list or tuple) of one or more JSON file paths
- path to a directory containing one or more JSON files
database (str): Name of the database into which the JSON files
will be imported
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (int): Port of the MongoDB database. Default is ``27017``.
user (str): Username for the MongoDB database, if authentication is enabled.
Default is ``None``, which results in attempting connection without
authentication.
password (str): Password for the MongoDB database, if authentication is enabled.
Default is ``None``, which results in attempting connection without
authentication.
delim (str): Delimiter, when generating collection names using a single delimiter.
Default is ``_``
delim_occurance (int): Occurance at which to split filename when using a
single delimiter. Default is ``1``
delim1 (str): Left delimiter when splitting with two delimiters. Default is None.
delim1_occurance (int): Occurance of ``delim1`` at which to split filename.
Default is ``1``
delim2 (str): Right delimiter when splitting with two delimiters. Default is None.
delim2_occurance (int): Occurance of ``delim2`` at which to split filename.
Default is ``1`` | entailment |
def index(db, collection, fields, directions=None, desc=False, background=False):
'''
Builds a simple (single field) or complex (multiple fields) index
on a single collection in a MongoDB database.
Args:
db (Database): A pymongo Database object.
collection (str): Collection name.
fields: Can be one of two things:
- the name of a single field, as a string
- an iterable (list/tuple) of one or more field names
desc (bool): If ``True``, all indexes will be created in descending order.
Default is ``False``.
directions (list): For complex indexes for which you'd like to have
different indexing directions (ascending for some fields, descending
for others), you can pass a list of pymongo direction objects (
pymongo.ASCENDING and pymongo.DESCENDING), in the same order as the
list of fields to be indexed. Must be the same length as the list
of index fields. Default is ``None``.
background (bool): If ``True``, the indexing operation will be processed
in the background. When performing background indexes, the MongoDB
database will not be locked.
'''
import pymongo
if type(fields) in STR_TYPES:
fields = [fields, ]
if directions is None:
_dir = pymongo.DESCENDING if desc else pymongo.ASCENDING
directions = [_dir] * len(fields)
field_tuples = list(zip(fields, directions))
coll = db[collection]
coll.create_index(field_tuples, background=background) | Builds a simple (single field) or complex (multiple fields) index
on a single collection in a MongoDB database.
Args:
db (Database): A pymongo Database object.
collection (str): Collection name.
fields: Can be one of two things:
- the name of a single field, as a string
- an iterable (list/tuple) of one or more field names
desc (bool): If ``True``, all indexes will be created in descending order.
Default is ``False``.
directions (list): For complex indexes for which you'd like to have
different indexing directions (ascending for some fields, descending
for others), you can pass a list of pymongo direction objects (
pymongo.ASCENDING and pymongo.DESCENDING), in the same order as the
list of fields to be indexed. Must be the same length as the list
of index fields. Default is ``None``.
background (bool): If ``True``, the indexing operation will be processed
in the background. When performing background indexes, the MongoDB
database will not be locked. | entailment |
def get_settings_from_profile(profile, profile_dir=None):
# type: (str, Any)->str
""""Returns the configuration file path for the given profile.
:param profile: Profile name to be used.
:param profile_dir: The directory where the profile configuration file should reside. It
may be also a module, and then the directory of the module is used.
:return: Configuration file path.
"""
if profile_dir is None:
import settings
profile_dir = settings
if hasattr(profile_dir, '__file__'):
profile_dir = os.path.dirname(profile_dir.__file__)
return os.path.join(profile_dir, '{0}.cfg'.format(profile)) | Returns the configuration file path for the given profile.
:param profile: Profile name to be used.
:param profile_dir: The directory where the profile configuration file should reside. It
may be also a module, and then the directory of the module is used.
:return: Configuration file path. | entailment |
def _configure(self, config_object=None):
# type: (Any) -> None
"""Read the configuration from config files.
Loads the default settings and the profile settings if available.
Check :func:`.set_profile`.
:param config_object:
This parameter is the configuration decscription may be a dict or
string describing the module from which the configuration is used.
Default is settings.default_settings.
"""
if config_object:
self.config.from_mapping(config_object)
else:
self.config.from_object(self._default_settings_module)
zsl_settings = os.environ.get(SETTINGS_ENV_VAR_NAME)
if zsl_settings is not None:
self.config.from_envvar(SETTINGS_ENV_VAR_NAME) | Read the configuration from config files.
Loads the default settings and the profile settings if available.
Check :func:`.set_profile`.
:param config_object:
This parameter is the configuration decscription may be a dict or
string describing the module from which the configuration is used.
Default is settings.default_settings. | entailment |
def _get_app_module(self):
# type: () -> Callable
"""Returns a module which binds the current app and configuration.
:return: configuration callback
:rtype: Callable
"""
def configure(binder):
# type: (Binder) -> Callable
binder.bind(ServiceApplication, to=self, scope=singleton)
binder.bind(Config, to=self.config, scope=singleton)
return configure | Returns a module which binds the current app and configuration.
:return: configuration callback
:rtype: Callable | entailment |
def _configure_injector(self, modules):
"""Create the injector and install the modules.
There is a necessary order of calls. First we have to bind `Config` and
`Zsl`, then we need to register the app into the global stack and then
we can install all other modules, which can use `Zsl` and `Config`
injection.
:param modules: list of injection modules
:type modules: list
"""
self._register()
self._create_injector()
self._bind_core()
self._bind_modules(modules)
self.logger.debug("Injector configuration with modules {0}.".format(modules))
self._dependencies_initialized = True | Create the injector and install the modules.
There is a necessary order of calls. First we have to bind `Config` and
`Zsl`, then we need to register the app into the global stack and then
we can install all other modules, which can use `Zsl` and `Config`
injection.
:param modules: list of injection modules
:type modules: list | entailment |
def configure(self, binder):
# type: (Binder) -> None
"""Initializer of the cache - creates the Redis cache module as the
default cache infrastructure. The module is bound to `RedisCacheModule`
and `CacheModule` keys. The initializer also creates `RedisIdHelper`
and bounds it to `RedisIdHelper` and `IdHelper` keys.
:param Binder binder: The binder object holding the binding context, we\
add cache to the binder.
"""
redis_cache_module = RedisCacheModule()
binder.bind(
RedisCacheModule,
to=redis_cache_module,
scope=singleton
)
binder.bind(
CacheModule,
to=redis_cache_module,
scope=singleton
)
redis_id_helper = RedisIdHelper()
binder.bind(
RedisIdHelper,
to=redis_id_helper,
scope=singleton
)
binder.bind(
IdHelper,
to=redis_id_helper,
scope=singleton
)
logging.debug("Created RedisCache binding.") | Initializer of the cache - creates the Redis cache module as the
default cache infrastructure. The module is bound to `RedisCacheModule`
and `CacheModule` keys. The initializer also creates `RedisIdHelper`
and bounds it to `RedisIdHelper` and `IdHelper` keys.
:param Binder binder: The binder object holding the binding context, we\
add cache to the binder. | entailment |
def error_handler(f):
"""
Default error handler.
- On server side error shows a message
'An error occurred!' and returns 500 status code.
- Also serves well in the case when the resource/task/method
is not found - returns 404 status code.
"""
@wraps(f)
def error_handling_function(*args, **kwargs):
@inject(error_config=ErrorConfiguration)
def get_error_configuration(error_config):
# type:(ErrorConfiguration)->ErrorConfiguration
return error_config
def should_skip_handling():
use_flask_handler = get_error_configuration().use_flask_handler
is_web_request = isinstance(JobContext.get_current_context(), WebJobContext)
return use_flask_handler and is_web_request
try:
return f(*args, **kwargs)
except Exception as ex:
if should_skip_handling():
raise
for ep in _error_processors:
ep.handle(ex)
for eh in _error_handlers:
if eh.can_handle(ex):
return eh.handle(ex)
return _DEFAULT_ERROR_HANDLER.handle(ex)
return error_handling_function | Default error handler.
- On server side error shows a message
'An error occurred!' and returns 500 status code.
- Also serves well in the case when the resource/task/method
is not found - returns 404 status code. | entailment |
def lazy_property(func):
'''
Wraps a property to provide lazy evaluation. Eliminates boilerplate.
Also provides for setting and deleting the property.
Use as you would use the @property decorator::
# OLD:
class MyClass():
def __init__():
self._compute = None
@property
def compute(self):
if self._compute is None:
# computationally intense stuff
# ...
# ...
self._compute = result
return self._compute
@compute.setter
def compute(self, value):
self._compute = value
# NEW:
class MyClass():
def __init__():
pass
@lazy_property
def compute(self):
# computationally intense stuff
# ...
# ...
return result
.. note:
Properties wrapped with ``lazy_property`` are only evaluated once.
If the instance state changes, lazy properties will not be automatically
re-evaulated and the update must be explicitly called for::
c = MyClass(data)
prop = c.lazy_property
# If you update some data that affects c.lazy_property
c.data = new_data
# c.lazy_property won't change
prop == c.lazy_property # TRUE
# If you want to update c.lazy_property, you can delete it, which will
# force it to be recomputed (with the new data) the next time you use it
del c.lazy_property
new_prop = c.lazy_property
new_prop == prop # FALSE
'''
attr_name = '_lazy_' + func.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
@_lazy_property.deleter
def _lazy_property(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
@_lazy_property.setter
def _lazy_property(self, value):
setattr(self, attr_name, value)
return _lazy_property | Wraps a property to provide lazy evaluation. Eliminates boilerplate.
Also provides for setting and deleting the property.
Use as you would use the @property decorator::
# OLD:
class MyClass():
def __init__():
self._compute = None
@property
def compute(self):
if self._compute is None:
# computationally intense stuff
# ...
# ...
self._compute = result
return self._compute
@compute.setter
def compute(self, value):
self._compute = value
# NEW:
class MyClass():
def __init__():
pass
@lazy_property
def compute(self):
# computationally intense stuff
# ...
# ...
return result
.. note:
Properties wrapped with ``lazy_property`` are only evaluated once.
If the instance state changes, lazy properties will not be automatically
re-evaulated and the update must be explicitly called for::
c = MyClass(data)
prop = c.lazy_property
# If you update some data that affects c.lazy_property
c.data = new_data
# c.lazy_property won't change
prop == c.lazy_property # TRUE
# If you want to update c.lazy_property, you can delete it, which will
# force it to be recomputed (with the new data) the next time you use it
del c.lazy_property
new_prop = c.lazy_property
new_prop == prop # FALSE | entailment |
def coroutine(func):
'''
Initializes a coroutine -- essentially it just takes a
generator function and calls generator.next() to get
things going.
'''
def start(*args, **kwargs):
cr = func(*args, **kwargs)
cr.next()
return cr
return start | Initializes a coroutine -- essentially it just takes a
generator function and calls generator.next() to get
things going. | entailment |
def fasta(self):
'''
str: Returns the sequence, as a FASTA-formatted string
Note: The FASTA string is built using ``Sequence.id`` and ``Sequence.sequence``.
'''
if not self._fasta:
self._fasta = '>{}\n{}'.format(self.id, self.sequence)
return self._fasta | str: Returns the sequence, as a FASTA-formatted string
Note: The FASTA string is built using ``Sequence.id`` and ``Sequence.sequence``. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.