sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def run_evaluation(self, stream_name: str) -> None:
"""
Run the main loop with the given stream in the prediction mode.
:param stream_name: name of the stream to be evaluated
"""
def prediction():
logging.info('Running prediction')
self._run_zeroth_epoch([stream_name])
logging.info('Prediction done\n\n')
self._try_run(prediction) | Run the main loop with the given stream in the prediction mode.
:param stream_name: name of the stream to be evaluated | entailment |
def major_vote(all_votes: Iterable[Iterable[Hashable]]) -> Iterable[Hashable]:
"""
For the given iterable of object iterations, return an iterable of the most common object at each position of the
inner iterations.
E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects
at the first and second positions respectively.
:param all_votes: an iterable of object iterations
:return: the most common objects in the iterations (the major vote)
"""
return [Counter(votes).most_common()[0][0] for votes in zip(*all_votes)] | For the given iterable of object iterations, return an iterable of the most common object at each position of the
inner iterations.
E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects
at the first and second positions respectively.
:param all_votes: an iterable of object iterations
:return: the most common objects in the iterations (the major vote) | entailment |
def _load_models(self) -> None:
"""Maybe load all the models to be assembled together and save them to the ``self._models`` attribute."""
if self._models is None:
logging.info('Loading %d models', len(self._model_paths))
def load_model(model_path: str):
logging.debug('\tloading %s', model_path)
if path.isdir(model_path):
model_path = path.join(model_path, CXF_CONFIG_FILE)
config = load_config(model_path)
config['model']['inputs'] = self._inputs
config['model']['outputs'] = self._outputs
return create_model(config, output_dir=None, dataset=self._dataset,
restore_from=path.dirname(model_path))
self._models = list(map(load_model, self._model_paths)) | Maybe load all the models to be assembled together and save them to the ``self._models`` attribute. | entailment |
def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:
"""
Run feed-forward pass with the given batch using all the models, aggregate and return the results.
.. warning::
:py:class:`Ensemble` can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: aggregated results dict
:raise ValueError: if the ``train`` flag is set to ``True``
"""
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models
batch_outputs = [model.run(batch, False, stream) for model in self._models]
# aggregate the outputs
aggregated = {}
for output_name in self._outputs:
output_values = [batch_output[output_name] for batch_output in batch_outputs]
if self._aggregation == 'mean':
aggregated[output_name] = np.mean(output_values, axis=0)
elif self._aggregation == 'major_vote':
output_values_arr = np.array(output_values)
output = major_vote(output_values_arr.reshape((output_values_arr.shape[0], -1)))
aggregated[output_name] = np.array(output).reshape(output_values_arr[0].shape)
return aggregated | Run feed-forward pass with the given batch using all the models, aggregate and return the results.
.. warning::
:py:class:`Ensemble` can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: aggregated results dict
:raise ValueError: if the ``train`` flag is set to ``True`` | entailment |
def parse_fully_qualified_name(fq_name: str) -> Tuple[Optional[str], str]:
"""
Parse the given fully-quallified name (separated with dots) to a tuple of module and class names.
:param fq_name: fully qualified name separated with dots
:return: ``None`` instead of module if the given name contains no separators (dots).
"""
last_dot = fq_name.rfind('.')
if last_dot != -1:
return fq_name[:last_dot], fq_name[last_dot + 1:]
else:
return None, fq_name | Parse the given fully-quallified name (separated with dots) to a tuple of module and class names.
:param fq_name: fully qualified name separated with dots
:return: ``None`` instead of module if the given name contains no separators (dots). | entailment |
def get_attribute(module_name: str, attribute_name: str):
"""
Get the specified module attribute. It most cases, it will be a class or function.
:param module_name: module name
:param attribute_name: attribute name
:return: module attribute
"""
assert isinstance(module_name, str)
assert isinstance(attribute_name, str)
_module = importlib.import_module(module_name)
return getattr(_module, attribute_name) | Get the specified module attribute. It most cases, it will be a class or function.
:param module_name: module name
:param attribute_name: attribute name
:return: module attribute | entailment |
def create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT):
"""
Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance
"""
return get_attribute(module_name, class_name)(*args, **kwargs) | Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance | entailment |
def list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index
"""
List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed
"""
_module = importlib.import_module(module_name)
return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)] | List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed | entailment |
def find_class_module(module_name: str, class_name: str) \
-> Tuple[List[str], List[Tuple[str, Exception]]]: # pylint: disable=invalid-sequence-index
"""
Find sub-modules of the given module that contain the given class.
Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples.
:param module_name: name of the module to be searched
:param class_name: searched class name
:return: a tuple of sub-modules having the searched class and sub-modules that could not be searched
"""
matched_submodules = []
erroneous_submodules = []
for submodule_name in list_submodules(module_name):
try: # the sub-module to be included may be erroneous and we need to continue
submodule = importlib.import_module(submodule_name)
if hasattr(submodule, class_name):
matched_submodules.append(submodule_name)
except Exception as ex: # pylint: disable=broad-except
erroneous_submodules.append((submodule_name, ex))
return matched_submodules, erroneous_submodules | Find sub-modules of the given module that contain the given class.
Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples.
:param module_name: name of the module to be searched
:param class_name: searched class name
:return: a tuple of sub-modules having the searched class and sub-modules that could not be searched | entailment |
def get_class_module(module_name: str, class_name: str) -> Optional[str]:
"""
Get a sub-module of the given module which has the given class.
This method wraps `utils.reflection.find_class_module method` with the following behavior:
- raise error when multiple sub-modules with different classes with the same name are found
- return None when no sub-module is found
- warn about non-searchable sub-modules
.. note::
This function logs!
:param module_name: module to be searched
:param class_name: searched class name
:return: sub-module with the searched class or None
"""
matched_modules, erroneous_modules = find_class_module(module_name, class_name)
for submodule, error in erroneous_modules:
logging.warning('Could not inspect sub-module `%s` due to `%s` '
'when searching for `%s` in sub-modules of `%s`.',
submodule, type(error).__name__, class_name, module_name)
if len(matched_modules) == 1:
return matched_modules[0]
if len(matched_modules) > 1:
# check if all the module attributes point to the same class
first_class = getattr(importlib.import_module(matched_modules[0]), class_name)
for matched_module in matched_modules:
another_class = getattr(importlib.import_module(matched_module), class_name)
if another_class is not first_class:
raise ValueError('Found more than one sub-module when searching for `{}` in sub-modules of `{}`. '
'Please specify the module explicitly. Found sub-modules: `{}`'
.format(class_name, module_name, matched_modules))
return matched_modules[0]
return None | Get a sub-module of the given module which has the given class.
This method wraps `utils.reflection.find_class_module method` with the following behavior:
- raise error when multiple sub-modules with different classes with the same name are found
- return None when no sub-module is found
- warn about non-searchable sub-modules
.. note::
This function logs!
:param module_name: module to be searched
:param class_name: searched class name
:return: sub-module with the searched class or None | entailment |
def entry_point() -> None:
"""**cxflow** entry point."""
# make sure the path contains the current working directory
sys.path.insert(0, os.getcwd())
parser = get_cxflow_arg_parser(True)
# parse CLI arguments
known_args, unknown_args = parser.parse_known_args()
# show help if no subcommand was specified.
if not hasattr(known_args, 'subcommand'):
parser.print_help()
quit(1)
# set up global logger
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG if known_args.verbose else logging.INFO)
logger.handlers = [] # remove default handlers
# set up STDERR handler
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))
logger.addHandler(stderr_handler)
if known_args.subcommand == 'train':
train(config_path=known_args.config_file, cl_arguments=unknown_args, output_root=known_args.output_root)
elif known_args.subcommand == 'resume':
resume(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args,
output_root=known_args.output_root)
elif known_args.subcommand == 'predict':
logging.warning('Predict command is deprecated and will be removed, use ``cxflow eval predict ...`` instead')
predict(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args,
output_root=known_args.output_root)
elif known_args.subcommand == 'eval':
evaluate(model_path=known_args.model_path, stream_name=known_args.stream_name,
config_path=known_args.config, cl_arguments=unknown_args, output_root=known_args.output_root)
elif known_args.subcommand == 'dataset':
invoke_dataset_method(config_path=known_args.config_file, method_name=known_args.method,
cl_arguments=unknown_args, output_root=known_args.output_root)
elif known_args.subcommand == 'gridsearch':
grid_search(script=known_args.script, params=known_args.params, dry_run=known_args.dry_run)
elif known_args.subcommand == 'ls':
list_train_dirs(known_args.dir, known_args.recursive, known_args.all, known_args.long, known_args.verbose)
elif known_args.subcommand == 'prune':
prune_train_dirs(known_args.dir, known_args.epochs, known_args.subdirs) | **cxflow** entry point. | entailment |
def get_cxflow_arg_parser(add_common_arguments: bool=False) -> ArgumentParser:
"""
Create the **cxflow** argument parser.
:return: an instance of the parser
"""
# create parser
main_parser = ArgumentParser('cxflow',
description='cxflow: lightweight framework for machine learning with '
'focus on modularization, re-usability and rapid experimenting.',
epilog='For more info see <https://cognexa.github.io/cxflow>')
main_parser.add_argument('--version', action='version', help='Print cxflow version and quit.',
version='cxflow {}'.format(pkg_resources.get_distribution('cxflow').version))
subparsers = main_parser.add_subparsers(help='cxflow commands')
# create train sub-parser
train_parser = subparsers.add_parser('train', description='Start cxflow training from the ``config_file``.')
train_parser.set_defaults(subcommand='train')
train_parser.add_argument('config_file', help='path to the config file')
# create resume sub-parser
resume_parser = subparsers.add_parser('resume', description='Resume cxflow training from the ``config_path``.')
resume_parser.set_defaults(subcommand='resume')
resume_parser.add_argument('config_path', help='path to the config file or the directory in which it is stored')
resume_parser.add_argument('restore_from', nargs='?', default=None,
help='information passed to the model constructor (backend-specific); '
'usually a directory in which the trained model is stored')
# create predict sub-parser (deprecated)
predict_parser = subparsers.add_parser('predict', description='Run prediction with the given ``config_path``.')
predict_parser.set_defaults(subcommand='predict')
predict_parser.add_argument('config_path', help='path to the config file or the directory in which it is stored')
predict_parser.add_argument('restore_from', nargs='?', default=None,
help='information passed to the model constructor (backend-specific); usually a '
'directory in which the trained model is stored')
# create eval sub-parser
eval_parser = subparsers.add_parser('eval', description='Evaluate the given model on the specified data stream.')
eval_parser.set_defaults(subcommand='eval')
eval_parser.add_argument('stream_name', help='stream name to be evaluated')
eval_parser.add_argument('model_path', help='model path to be evaluated')
eval_parser.add_argument('--config', '-c', nargs='?', default=None, help='optional config path to be used')
# create dataset sub-parser
dataset_parser = subparsers.add_parser('dataset', description='Invoke arbitrary dataset method.')
dataset_parser.set_defaults(subcommand='dataset')
dataset_parser.add_argument('method', help='name of the method to be invoked')
dataset_parser.add_argument('config_file', help='path to the config file')
# create grid-search sub-parser
gridsearch_parser = subparsers.add_parser('gridsearch', description='Do parameter grid search (experimental).')
gridsearch_parser.set_defaults(subcommand='gridsearch')
gridsearch_parser.add_argument('script', help='Script to be grid-searched')
gridsearch_parser.add_argument('params', nargs='*', help='Params to be tested. Format: name:type=[value1,value2]. '
'Type is optional')
gridsearch_parser.add_argument('--dry-run', action='store_true', help='Only print command output instead '
'of executing it right away')
# create ls sub-parser
ls_parser = subparsers.add_parser('ls', description='List training log dirs in the given path.')
ls_parser.set_defaults(subcommand='ls')
ls_parser.add_argument('dir', nargs='?', default=CXF_DEFAULT_LOG_DIR,
help='path to the log directory to be listed')
ls_parser.add_argument('-l', '--long', action='store_true', help='use long listing format')
ls_parser.add_argument('-a', '--all', action='store_true', help='include trainings with no epochs done')
ls_parser.add_argument('-r', '--recursive', action='store_true',
help='list all the dirs recursively, stop at training dirs')
ls_parser.add_argument('-v', '--verbose', action='store_true',
help='print more verbose output, applicable only when a single train dir is listed')
# create prune sub-parser
prune_parser = subparsers.add_parser('prune', description='Prune training log dirs in the given path without finished epochs.')
prune_parser.set_defaults(subcommand='prune')
prune_parser.add_argument('dir', nargs='?', default=CXF_DEFAULT_LOG_DIR,
help='path to the log directory to be pruned')
prune_parser.add_argument('-e', '--epochs', default=1, type=int,
help='keep only training log dirs having at least this many completed epochs, default 1')
prune_parser.add_argument('-s', '--subdirs', action='store_true',
help='delete all subdirectories in training directories')
# add common arguments
if add_common_arguments:
for parser in [main_parser, train_parser, resume_parser, predict_parser, dataset_parser, eval_parser]:
parser.add_argument('--output_root', '-o', default='./log', help='output directory')
parser.add_argument('--verbose', '-v', action='store_true', help='increase verbosity to level DEBUG')
return main_parser | Create the **cxflow** argument parser.
:return: an instance of the parser | entailment |
def _get_stream(self) -> Iterator:
"""Possibly create and return raw dataset stream iterator."""
if self._stream is None:
self._stream = iter(self._get_stream_fn())
return self._stream | Possibly create and return raw dataset stream iterator. | entailment |
def _enqueue_batches(self, stop_event: Event) -> None:
"""
Enqueue all the stream batches. If specified, stop after ``epoch_size`` batches.
.. note::
Signal the epoch end with ``None``.
Stop when:
- ``stop_event`` is risen
- stream ends and epoch size is not set
- specified number of batches is enqueued
.. note::
This is used only with ``buffer`` > 0.
:param stop_event: event signaling stop instruction
"""
while True:
self._stream = self._get_stream()
while True:
# Acquire the semaphore before processing the next batch
# but immediately release it so that other threads
# are not blocked when they decide to acquire it again.
with self._semaphore:
pass
# It always takes a short moment before the native call actually
# releases the GIL and we are free to compute. The following sleep
# is here to compensate for this short moment - we don't want to
# slow down the native call before the GIL is released.
time.sleep(CXF_BUFFER_SLEEP)
try:
batch = next(self._stream)
except StopIteration:
break
self._queue.put(batch)
self._batch_count += 1
if stop_event.is_set():
return
if self._epoch_limit_reached():
self._queue.put(None)
self._batch_count = 0
return
self._stream = None # yield a new iterator next time
if self._epoch_size <= 0: # for non-fixed size epochs
self._queue.put(None)
self._batch_count = 0
return | Enqueue all the stream batches. If specified, stop after ``epoch_size`` batches.
.. note::
Signal the epoch end with ``None``.
Stop when:
- ``stop_event`` is risen
- stream ends and epoch size is not set
- specified number of batches is enqueued
.. note::
This is used only with ``buffer`` > 0.
:param stop_event: event signaling stop instruction | entailment |
def _dequeue_batch(self) -> Optional[Batch]:
"""
Return a single batch from queue or ``None`` signaling epoch end.
:raise ChildProcessError: if the enqueueing thread ended unexpectedly
"""
if self._enqueueing_thread is None:
raise ValueError('StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.'
.format(self._name, self._buffer_size))
if not self._enqueueing_thread.is_alive() and self._queue.empty():
self._start_thread()
while True:
try:
batch = self._queue.get(timeout=2)
self._queue.task_done()
break
except Empty:
if not self._enqueueing_thread.is_alive():
try:
# the enqueueing thread may just finished properly so lets check the queue eagerly
batch = self._queue.get_nowait()
self._queue.task_done()
break
except Empty:
# so we failed to retrieve a batch and the enqueueing thread is dead
# there is no hope, something must went wrong
raise ChildProcessError('Enqueueing thread ended unexpectedly.')
return batch | Return a single batch from queue or ``None`` signaling epoch end.
:raise ChildProcessError: if the enqueueing thread ended unexpectedly | entailment |
def _next_batch(self) -> Optional[Batch]:
"""
Return a single batch or ``None`` signaling epoch end.
.. note::
Signal the epoch end with ``None``.
Stop when:
- stream ends and epoch size is not set
- specified number of batches is returned
:return: a single batch or ``None`` signaling epoch end
"""
if self._epoch_limit_reached():
self._batch_count = 0
return None
try:
batch = next(self._get_stream())
self._batch_count += 1
return batch
except StopIteration:
self._stream = None # yield a new iterator next time
if self._epoch_size > 0: # underlying stream ended but our fixed size epoch did not
batch = next(self._get_stream()) # get another stream and return its 1st batch
self._batch_count += 1
return batch
else:
self._batch_count = 0
return None | Return a single batch or ``None`` signaling epoch end.
.. note::
Signal the epoch end with ``None``.
Stop when:
- stream ends and epoch size is not set
- specified number of batches is returned
:return: a single batch or ``None`` signaling epoch end | entailment |
def _start_thread(self):
"""Start an enqueueing thread."""
self._stopping_event = Event()
self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,))
self._enqueueing_thread.start() | Start an enqueueing thread. | entailment |
def _stop_thread(self):
"""Stop the enqueueing thread. Keep the queue content and stream state."""
self._stopping_event.set()
queue_content = []
try: # give the enqueueing thread chance to put a batch to the queue and check the stopping event
while True:
queue_content.append(self._queue.get_nowait())
except Empty:
pass
self._enqueueing_thread.join()
try:
queue_content.append(self._queue.get_nowait()) # collect the very last item
except Empty:
pass
self._queue = Queue(max(len(queue_content), self._buffer_size)) # queue content may be bigger than queue size
for batch in queue_content:
self._queue.put(batch) | Stop the enqueueing thread. Keep the queue content and stream state. | entailment |
def _after_n_epoch(self, epoch_id: int, **_) -> None:
"""
Save the model every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch
"""
SaveEvery.save_model(model=self._model, name_suffix=str(epoch_id), on_failure=self._on_save_failure) | Save the model every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch | entailment |
def save_model(model: AbstractModel, name_suffix: str, on_failure: str) -> None:
"""
Save the given model with the given name_suffix. On failure, take the specified action.
:param model: the model to be saved
:param name_suffix: name to be used for saving
:param on_failure: action to be taken on failure; one of :py:attr:`SAVE_FAILURE_ACTIONS`
:raise IOError: on save failure with ``on_failure`` set to ``error``
"""
try:
logging.debug('Saving the model')
save_path = model.save(name_suffix)
logging.info('Model saved to: %s', save_path)
except Exception as ex: # pylint: disable=broad-except
if on_failure == 'error':
raise IOError('Failed to save the model.') from ex
elif on_failure == 'warn':
logging.warning('Failed to save the model.') | Save the given model with the given name_suffix. On failure, take the specified action.
:param model: the model to be saved
:param name_suffix: name to be used for saving
:param on_failure: action to be taken on failure; one of :py:attr:`SAVE_FAILURE_ACTIONS`
:raise IOError: on save failure with ``on_failure`` set to ``error`` | entailment |
def _get_value(self, epoch_data: EpochData) -> float:
"""
Retrieve the value of the monitored variable from the given epoch data.
:param epoch_data: epoch data which determine whether the model will be saved or not
:raise KeyError: if any of the specified stream, variable or aggregation is not present in the ``epoch_data``
:raise TypeError: if the variable value is not a dict when aggregation is specified
:raise ValueError: if the variable value is not a scalar
"""
if self._stream_name not in epoch_data:
raise KeyError('Stream `{}` was not found in the epoch data.\nAvailable streams are `{}`.'
.format(self._stream_name, epoch_data.keys()))
stream_data = epoch_data[self._stream_name]
if self._variable not in stream_data:
raise KeyError('Variable `{}` for stream `{}` was not found in the epoch data. '
'Available variables for stream `{}` are `{}`.'
.format(self._variable, self._stream_name, self._stream_name, stream_data.keys()))
value = stream_data[self._variable]
if self._aggregation:
if not isinstance(value, dict):
raise TypeError('Variable `{}` is expected to be a dict when aggregation is specified. '
'Got `{}` instead.'.format(self._variable, type(value).__name__))
if self._aggregation not in value:
raise KeyError('Specified aggregation `{}` was not found in the variable `{}`. '
'Available aggregations: `{}`.'.format(self._aggregation, self._variable, value.keys()))
value = value[self._aggregation]
if not np.isscalar(value):
raise ValueError('Variable `{}` value is not a scalar.'.format(value))
return value | Retrieve the value of the monitored variable from the given epoch data.
:param epoch_data: epoch data which determine whether the model will be saved or not
:raise KeyError: if any of the specified stream, variable or aggregation is not present in the ``epoch_data``
:raise TypeError: if the variable value is not a dict when aggregation is specified
:raise ValueError: if the variable value is not a scalar | entailment |
def _is_value_better(self, new_value: float) -> bool:
"""
Test if the new value is better than the best so far.
:param new_value: current value of the objective function
"""
if self._best_value is None:
return True
if self._condition == 'min':
return new_value < self._best_value
if self._condition == 'max':
return new_value > self._best_value | Test if the new value is better than the best so far.
:param new_value: current value of the objective function | entailment |
def after_epoch(self, epoch_data: EpochData, **_) -> None:
"""
Save the model if the new value of the monitored variable is better than the best value so far.
:param epoch_data: epoch data to be processed
"""
new_value = self._get_value(epoch_data)
if self._is_value_better(new_value):
self._best_value = new_value
SaveEvery.save_model(model=self._model, name_suffix=self._OUTPUT_NAME, on_failure=self._on_save_failure) | Save the model if the new value of the monitored variable is better than the best value so far.
:param epoch_data: epoch data to be processed | entailment |
def print_progress_bar(done: int, total: int, prefix: str = '', suffix: str = '') -> None:
"""
Print a progressbar with the given prefix and suffix, without newline at the end.
param done: current step in computation
param total: total count of steps in computation
param prefix: info text displayed before the progress bar
param suffix: info text displayed after the progress bar
"""
percent = '{0:.1f}'.format(100 * (done / float(total)))
base_len = shutil.get_terminal_size().columns - 7 - len(prefix) - len(suffix)
base_len = min([base_len, 50])
min_length = base_len - 1 - len('{}/{}={}'.format(total, total, '100.0'))
length = base_len - len('{}/{}={}'.format(done, total, percent))
if min_length > 0:
filled_len = int(min_length * done // total)
bar = '='*filled_len + '-'*(min_length - filled_len)
spacing = ' '*(length - min_length)
print('\r{}: |{}|{}{}/{}={}% {}'.format(prefix, bar, spacing, done, total, percent, suffix), end='\r')
else:
short_progress = '\r{}: {}/{}'.format(prefix, done, total)
if len(short_progress) <= shutil.get_terminal_size().columns:
print(short_progress, end='\r')
else:
print(['-', '\\', '|', '/'][done % 4], end='\r') | Print a progressbar with the given prefix and suffix, without newline at the end.
param done: current step in computation
param total: total count of steps in computation
param prefix: info text displayed before the progress bar
param suffix: info text displayed after the progress bar | entailment |
def get_formatted_time(seconds: float) -> str:
"""
Convert seconds to the time format ``H:M:S.UU``.
:param seconds: time in seconds
:return: formatted human-readable time
"""
seconds = round(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return '{:d}:{:02d}:{:02d}'.format(h, m, s) | Convert seconds to the time format ``H:M:S.UU``.
:param seconds: time in seconds
:return: formatted human-readable time | entailment |
def after_batch(self, stream_name: str, batch_data: Batch) -> None:
"""
Display the progress and ETA for the current stream in the epoch.
If the stream size (total batch count) is unknown (1st epoch), print only the number of processed batches.
"""
if self._current_stream_name is None or self._current_stream_name != stream_name:
self._current_stream_name = stream_name
self._current_stream_start = None
erase_line()
self._current_batch_count[stream_name] += 1
current_batch = self._current_batch_count[stream_name]
# total batch count is available
if stream_name in self._total_batch_count:
# compute ETA
total_batches = self._total_batch_count[stream_name]
if self._current_stream_start:
measured_batches = current_batch - 1
avg_batch_time = (time.time() - self._current_stream_start) / measured_batches
eta_sec = avg_batch_time * (total_batches - current_batch)
eta = get_formatted_time(eta_sec)
else:
self._current_stream_start = time.time()
eta = ''
print_progress_bar(current_batch, total_batches, prefix=stream_name, suffix=eta)
# total batch count is not available (1st epoch)
else:
short_progress = '{}: {}'.format(stream_name, current_batch)
if len(short_progress) <= shutil.get_terminal_size().columns:
print(short_progress, end='\r')
else:
print(['-', '\\', '|', '/'][current_batch % 4], end='\r') | Display the progress and ETA for the current stream in the epoch.
If the stream size (total batch count) is unknown (1st epoch), print only the number of processed batches. | entailment |
def after_epoch(self, **_) -> None:
"""
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
"""
if not self._total_batch_count_saved:
self._total_batch_count = self._current_batch_count.copy()
self._total_batch_count_saved = True
self._current_batch_count.clear()
self._current_stream_start = None
self._current_stream_name = None
erase_line() | Reset progress counters. Save ``total_batch_count`` after the 1st epoch. | entailment |
def after_epoch_profile(self, epoch_id, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
Summarize and log the given epoch profile.
The profile is expected to contain at least:
- ``read_data_train``, ``eval_batch_train`` and ``after_batch_hooks_train`` entries produced by the train
stream (if train stream name is `train`)
- ``after_epoch_hooks`` entry
:param profile: epoch timings profile
:param extra_streams: enumeration of additional stream names
"""
read_data_total = 0
eval_total = 0
train_total = sum(profile.get('eval_batch_{}'.format(train_stream_name), []))
hooks_total = sum(profile.get('after_epoch_hooks', []))
for stream_name in chain(extra_streams, [train_stream_name]):
read_data_total += sum(profile.get('read_batch_' + stream_name, []))
hooks_total += sum(profile.get('after_batch_hooks_' + stream_name, []))
if stream_name != train_stream_name:
eval_total += sum(profile.get('eval_batch_' + stream_name, []))
logging.info('\tT read data:\t%f', read_data_total)
logging.info('\tT train:\t%f', train_total)
logging.info('\tT eval:\t%f', eval_total)
logging.info('\tT hooks:\t%f', hooks_total) | Summarize and log the given epoch profile.
The profile is expected to contain at least:
- ``read_data_train``, ``eval_batch_train`` and ``after_batch_hooks_train`` entries produced by the train
stream (if train stream name is `train`)
- ``after_epoch_hooks`` entry
:param profile: epoch timings profile
:param extra_streams: enumeration of additional stream names | entailment |
def confusion_matrix(expected: np.ndarray, predicted: np.ndarray, num_classes: int) -> np.ndarray:
"""
Calculate and return confusion matrix for the predicted and expected labels
:param expected: array of expected classes (integers) with shape `[num_of_data]`
:param predicted: array of predicted classes (integers) with shape `[num_of_data]`
:param num_classes: number of classification classes
:return: confusion matrix (cm) with absolute values
"""
assert np.issubclass_(expected.dtype.type, np.integer), " Classes' indices must be integers"
assert np.issubclass_(predicted.dtype.type, np.integer), " Classes' indices must be integers"
assert expected.shape == predicted.shape, "Predicted and expected data must be the same length"
assert num_classes > np.max([predicted, expected]), \
"Number of classes must be at least the number of indices in predicted/expected data"
assert np.min([predicted, expected]) >= 0, " Classes' indices must be positive integers"
cm_abs = np.zeros((num_classes, num_classes), dtype=np.int32)
for pred, exp in zip(predicted, expected):
cm_abs[exp, pred] += 1
return cm_abs | Calculate and return confusion matrix for the predicted and expected labels
:param expected: array of expected classes (integers) with shape `[num_of_data]`
:param predicted: array of predicted classes (integers) with shape `[num_of_data]`
:param num_classes: number of classification classes
:return: confusion matrix (cm) with absolute values | entailment |
def _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]:
"""
Build all grid search parameter configurations.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
"""
param_space = OrderedDict()
for arg in params:
assert '=' in arg
name = arg[:arg.index('=')]
options = arg[arg.index('=') + 1:]
options = ast.literal_eval(options)
assert isinstance(options, list), options
param_space[name] = options
param_names = param_space.keys()
commands = []
for values in itertools.product(*[param_space[name] for name in param_names]):
command = str(script).split()
for name, value in zip(param_names, values):
command.append(str(name) + '="' + str(value) + '"')
commands.append(command)
return commands | Build all grid search parameter configurations.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``. | entailment |
def grid_search(script: str, params: typing.Iterable[str], dry_run: bool=False) -> None:
"""
Build all grid search parameter configurations and optionally run them.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
:param dry_run: If set to ``True``, the built commands will only be printed instead of executed.
"""
commands = _build_grid_search_commands(script=script, params=params)
if dry_run:
logging.warning('Dry run')
for command in commands:
logging.info(command)
else:
for command in commands:
try:
completed_process = subprocess.run(command)
logging.info('Command `%s` completed with exit code %d', command, completed_process.returncode)
except Exception as _: # pylint: disable=broad-except
logging.error('Command `%s` failed.', command) | Build all grid search parameter configurations and optionally run them.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
:param dry_run: If set to ``True``, the built commands will only be printed instead of executed. | entailment |
def stream_info(self) -> None:
"""Check and report source names, dtypes and shapes of all the streams available."""
stream_names = [stream_name for stream_name in dir(self)
if 'stream' in stream_name and stream_name != 'stream_info']
logging.info('Found %s stream candidates: %s', len(stream_names), stream_names)
for stream_name in stream_names:
try:
stream_fn = getattr(self, stream_name)
logging.info(stream_name)
batch = next(iter(stream_fn()))
rows = []
for key, value in batch.items():
try:
value_arr = np.array(value)
row = [key, value_arr.dtype, value_arr.shape]
if value_arr.dtype.kind in 'bui': # boolean, unsigned, integer
row.append('{} - {}'.format(value_arr.min(), value_arr.max()))
elif value_arr.dtype.kind is 'f':
row.append('{0:.2f} - {1:.2f}'.format(value_arr.min(), value_arr.max()))
except ValueError: # np broadcasting failed (ragged array)
value_arr = None
row = [key, '{}'.format(type(value[0]).__name__), '({},)'.format(len(list(value)))]
if value_arr is None or \
(value_arr.ndim > 0 and value_arr.shape[1:] != np.array(value_arr[0]).shape):
logging.warning('*** stream source `%s` appears to be ragged (non-rectangular) ***', key)
rows.append(row)
for line in tabulate.tabulate(rows, headers=['name', 'dtype', 'shape', 'range'],
tablefmt='grid').split('\n'):
logging.info(line)
except Exception:
logging.warning('Exception was raised during checking stream `%s`, '
'(stack trace is displayed only with --verbose flag)', stream_name)
logging.debug(traceback.format_exc()) | Check and report source names, dtypes and shapes of all the streams available. | entailment |
def parse_arg(arg: str) -> typing.Tuple[str, typing.Any]:
"""
Parse CLI argument in format ``key=value`` to ``(key, value)``
:param arg: CLI argument string
:return: tuple (key, value)
:raise: yaml.ParserError: on yaml parse error
"""
assert '=' in arg, 'Unrecognized argument `{}`. [name]=[value] expected.'.format(arg)
key = arg[:arg.index('=')]
value = yaml.load(arg[arg.index('=') + 1:])
return key, value | Parse CLI argument in format ``key=value`` to ``(key, value)``
:param arg: CLI argument string
:return: tuple (key, value)
:raise: yaml.ParserError: on yaml parse error | entailment |
def load_config(config_file: str, additional_args: typing.Iterable[str]=()) -> dict:
"""
Load config from YAML ``config_file`` and extend/override it with the given ``additional_args``.
:param config_file: path the YAML config file to be loaded
:param additional_args: additional args which may extend or override the config loaded from the file.
:return: configuration as dict
"""
config = load_yaml(config_file)
for key_full, value in [parse_arg(arg) for arg in additional_args]:
key_split = key_full.split('.')
key_prefix = key_split[:-1]
key = key_split[-1]
conf = config
for key_part in key_prefix:
conf = conf[key_part]
conf[key] = value
return reload(config) | Load config from YAML ``config_file`` and extend/override it with the given ``additional_args``.
:param config_file: path the YAML config file to be loaded
:param additional_args: additional args which may extend or override the config loaded from the file.
:return: configuration as dict | entailment |
def find_config(config_path: str) -> str:
"""
Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path
"""
if path.isdir(config_path): # dir specified instead of config file
config_path = path.join(config_path, CXF_CONFIG_FILE)
assert path.exists(config_path), '`{}` does not exist'.format(config_path)
return config_path | Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path | entailment |
def fallback(message: str, ex: Exception) -> None:
"""
Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure
"""
logging.error('%s', message)
logging.exception('%s', ex)
sys.exit(1) | Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure | entailment |
def _configure_dataset(self, data_root: str=None, download_urls: Iterable[str]=None, **kwargs) -> None:
"""
Save the passed values and use them as a default property implementation.
:param data_root: directory to which the files will be downloaded
:param download_urls: list of URLs to be downloaded
"""
self._data_root = data_root
self._download_urls = download_urls | Save the passed values and use them as a default property implementation.
:param data_root: directory to which the files will be downloaded
:param download_urls: list of URLs to be downloaded | entailment |
def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from) | Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | entailment |
def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass | After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names | entailment |
def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) | Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list | entailment |
def yaml_to_file(data: Mapping, output_dir: str, name: str) -> str:
"""
Save the given object to the given path in YAML.
:param data: dict/list to be dumped
:param output_dir: target output directory
:param name: target filename
:return: target path
"""
dumped_config_f = path.join(output_dir, name)
with open(dumped_config_f, 'w') as file:
yaml.dump(data, file, Dumper=ruamel.yaml.RoundTripDumper)
return dumped_config_f | Save the given object to the given path in YAML.
:param data: dict/list to be dumped
:param output_dir: target output directory
:param name: target filename
:return: target path | entailment |
def yaml_to_str(data: Mapping) -> str:
"""
Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str
"""
return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper) | Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str | entailment |
def make_simple(data: Any) -> Any:
"""
Substitute all the references in the given data (typically a mapping or sequence) with the actual values.
This is useful, if you loaded a yaml with RoundTripLoader and you need to dump part of it safely.
:param data: data to be made simple (dict instead of CommentedMap etc.)
:return: simplified data
"""
return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.Loader) | Substitute all the references in the given data (typically a mapping or sequence) with the actual values.
This is useful, if you loaded a yaml with RoundTripLoader and you need to dump part of it safely.
:param data: data to be made simple (dict instead of CommentedMap etc.)
:return: simplified data | entailment |
def reload(data: Any) -> Any:
"""
Dump and load yaml data.
This is useful to avoid many anchor parsing bugs. When you edit a yaml config, reload it to make sure
the changes are propagated to anchor expansions.
:param data: data to be reloaded
:return: reloaded data
"""
return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.RoundTripLoader) | Dump and load yaml data.
This is useful to avoid many anchor parsing bugs. When you edit a yaml config, reload it to make sure
the changes are propagated to anchor expansions.
:param data: data to be reloaded
:return: reloaded data | entailment |
def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:
"""
Call :py:meth:`_on_plateau_action` if the ``long_term``
variable mean is lower/greater than the ``short_term`` mean.
"""
super().after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)
self._saved_loss.append(epoch_data[self._stream][self._variable][OnPlateau._AGGREGATION])
long_mean = np.mean(self._saved_loss[-self._long_term:])
short_mean = np.mean(self._saved_loss[-self._short_term:])
if self._objective == 'min' and long_mean < short_mean:
self._on_plateau_action(epoch_id=epoch_id, epoch_data=epoch_data)
elif self._objective == 'max' and long_mean > short_mean:
self._on_plateau_action(epoch_id=epoch_id, epoch_data=epoch_data) | Call :py:meth:`_on_plateau_action` if the ``long_term``
variable mean is lower/greater than the ``short_term`` mean. | entailment |
def _is_nan(self, variable: str, data) -> bool:
"""
Recursively search passed data and find NaNs.
:param variable: name of variable to be checked
:param data: data object (dict, list, scalar)
:return: `True` if there is a NaN value in the data; `False` otherwise.
:raise ValueError: if the variable value is of unsupported type and ``on_unknown_type`` is set to ``error``
"""
if isinstance(data, np.ndarray) or isinstance(data, list):
return any(np.isnan(data)) or (self._stop_on_inf and any(np.isinf(data)))
elif np.isscalar(data):
return np.isnan(data) or (self._stop_on_inf and np.isinf(data))
elif isinstance(data, dict):
return any([self._is_nan(key, value) for key, value in data.items()])
else:
message = 'Variable `{}` of type `{}` can not be checked for NaNs.'.format(variable, type(data))
if self._on_unkown_type == 'warn':
logging.warning(message)
elif self._on_unkown_type == 'error':
raise ValueError(message)
return False | Recursively search passed data and find NaNs.
:param variable: name of variable to be checked
:param data: data object (dict, list, scalar)
:return: `True` if there is a NaN value in the data; `False` otherwise.
:raise ValueError: if the variable value is of unsupported type and ``on_unknown_type`` is set to ``error`` | entailment |
def _check_nan(self, epoch_data: EpochData) -> None:
"""
Raise an exception when some of the monitored data is NaN.
:param epoch_data: epoch data checked
:raise KeyError: if the specified variable is not found in the stream
:raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be nan-checked was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if self._is_nan(variable, value):
raise TrainingTerminated('Variable `{}` is NaN.'.format(variable)) | Raise an exception when some of the monitored data is NaN.
:param epoch_data: epoch data checked
:raise KeyError: if the specified variable is not found in the stream
:raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error`` | entailment |
def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:
"""
If initialized to check after each epoch, stop the training once the epoch data contains a monitored
variable equal to NaN.
:param epoch_data: epoch data to be checked
"""
if self._after_epoch:
self._check_nan(epoch_data) | If initialized to check after each epoch, stop the training once the epoch data contains a monitored
variable equal to NaN.
:param epoch_data: epoch data to be checked | entailment |
def after_batch(self, stream_name: str, batch_data) -> None:
"""
If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked
"""
if self._after_batch:
self._check_nan({stream_name: batch_data}) | If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked | entailment |
def after_epoch(self, epoch_id: int, **kwargs) -> None:
"""
Call ``_after_n_epoch`` method every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch
"""
if epoch_id % self._n_epochs == 0:
self._after_n_epoch(epoch_id=epoch_id, **kwargs) | Call ``_after_n_epoch`` method every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch | entailment |
def path_total_size(path_: str) -> int:
"""Compute total size of the given file/dir."""
if path.isfile(path_):
return path.getsize(path_)
total_size = 0
for root_dir, _, files in os.walk(path_):
for file_ in files:
total_size += path.getsize(path.join(root_dir, file_))
return total_size | Compute total size of the given file/dir. | entailment |
def humanize_filesize(filesize: int) -> Tuple[str, str]:
"""Return human readable pair of size and unit from the given filesize in bytes."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if filesize < 1024.0:
return '{:3.1f}'.format(filesize), unit+'B'
filesize /= 1024.0 | Return human readable pair of size and unit from the given filesize in bytes. | entailment |
def is_train_dir(dir_: str) -> bool:
"""Test if the given dir contains training artifacts."""
return path.exists(path.join(dir_, CXF_CONFIG_FILE)) and \
path.exists(path.join(dir_, CXF_TRACE_FILE)) and \
path.exists(path.join(dir_, CXF_LOG_FILE)) | Test if the given dir contains training artifacts. | entailment |
def walk_train_dirs(root_dir: str) -> Iterable[Tuple[str, Iterable[str]]]:
"""
Modify os.walk with the following:
- return only root_dir and sub-dirs
- return only training sub-dirs
- stop recursion at training dirs
:param root_dir: root dir to be walked
:return: generator of (root_dir, training sub-dirs) pairs
"""
if is_train_dir(root_dir):
yield '', [root_dir]
return
for dir_, subdirs, _ in os.walk(root_dir, topdown=True):
# filter train sub-dirs
train_subdirs = [subdir for subdir in subdirs if is_train_dir(path.join(dir_, subdir))]
# stop the recursion at the train sub-dirs
for subdir in train_subdirs:
subdirs.remove(subdir)
yield dir_, train_subdirs | Modify os.walk with the following:
- return only root_dir and sub-dirs
- return only training sub-dirs
- stop recursion at training dirs
:param root_dir: root dir to be walked
:return: generator of (root_dir, training sub-dirs) pairs | entailment |
def _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace)
"""
long_table = []
for train_dir, config, trace in trainings:
start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]
if start_datetime:
age = format_timedelta(datetime.now() - start_datetime) + ' ago'
if end_datetime:
duration = format_timedelta(end_datetime - start_datetime)
else:
duration = CXF_NA_STR
else:
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0
long_table.append([path.basename(train_dir)] +
list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +
[age, duration, epochs_done])
print(tabulate(long_table, tablefmt='plain')) | Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace) | entailment |
def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:
"""
Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace)
"""
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace) | entailment |
def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace)
"""
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed('summary')
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))
print() | Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace) | entailment |
def _ls_print_verbose(training: Tuple[str, dict, str]) -> None:
"""
Print config and artifacts info from the given training tuple (train_dir, configuration dict, trace).
:param training: training tuple (train_dir, configuration dict, trace)
"""
train_dir, config, _ = training
print_boxed('config')
print(yaml_to_str(config))
print()
print_boxed('artifacts')
_, dirs, files = next(os.walk(train_dir))
artifacts = [('d', dir) for dir in dirs] + \
[('-', file_) for file_ in files if file_ not in [CXF_CONFIG_FILE, CXF_LOG_FILE, CXF_TRACE_FILE]]
artifacts = [(type_, name) + humanize_filesize(path_total_size(path.join(train_dir, name)))
for type_, name in artifacts]
print(tabulate(artifacts, tablefmt='plain', floatfmt='3.1f'))
print() | Print config and artifacts info from the given training tuple (train_dir, configuration dict, trace).
:param training: training tuple (train_dir, configuration dict, trace) | entailment |
def list_train_dirs(dir_: str, recursive: bool, all_: bool, long: bool, verbose: bool) -> None:
"""
List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.
The function is accessible through cxflow CLI `cxflow ls`.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, odel and dataset class,
age, duration and epochs done (--long option)
:param verbose: print more verbose output with list of additional artifacts and training config,
applicable only when a single train dir is listed (--verbose option)
"""
if verbose:
long = True
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
all_trainings = _ls_print_listing(dir_, recursive, all_, long)
if long and len(all_trainings) > 1:
if not recursive:
print()
_ls_print_summary(all_trainings)
if verbose and len(all_trainings) == 1:
if not recursive:
print()
_ls_print_verbose(all_trainings[0]) | List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.
The function is accessible through cxflow CLI `cxflow ls`.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, odel and dataset class,
age, duration and epochs done (--long option)
:param verbose: print more verbose output with list of additional artifacts and training config,
applicable only when a single train dir is listed (--verbose option) | entailment |
def after_batch(self, stream_name: str, batch_data: Batch):
"""
Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar)
"""
for variable in self._variables:
if variable in batch_data:
value = batch_data[variable]
if not hasattr(value, '__iter__'):
raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))
self._accumulator[stream_name][variable] += list(value)
else:
raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '
'Available variables are `{}`.'.format(variable, batch_data.keys())) | Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar) | entailment |
def invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None:
"""
Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = dataset = method = output_dir = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
assert 'dataset' in config, '`dataset` section not present in the config'
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
try:
dataset = create_dataset(config)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
method = getattr(dataset, method_name)
except AttributeError as ex:
fallback('Method `{}` not found in the dataset'.format(method_name), ex)
try:
method()
except Exception as ex: # pylint: disable=broad-except
fallback('Exception occurred during method `{}` invocation'.format(method_name), ex) | Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | entailment |
def _signal_handler(self, *_) -> None:
"""
On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal.
"""
if self._num_signals == 0:
logging.warning('Interrupt signal caught - training will be terminated')
logging.warning('Another interrupt signal will terminate the program immediately')
self._num_signals += 1
else:
logging.error('Another interrupt signal caught - terminating program immediately')
sys.exit(2) | On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal. | entailment |
def create_output_dir(config: dict, output_root: str, default_model_name: str='Unnamed') -> str:
"""
Create output_dir under the given ``output_root`` and
- dump the given config to YAML file under this dir
- register a file logger logging to a file under this dir
:param config: config to be dumped
:param output_root: dir wherein output_dir shall be created
:param default_model_name: name to be used when `model.name` is not found in the config
:return: path to the created output_dir
"""
logging.info('Creating output dir')
# create output dir
model_name = default_model_name
if 'name' not in config['model']:
logging.warning('\tmodel.name not found in config, defaulting to: %s', model_name)
else:
model_name = config['model']['name']
if not os.path.exists(output_root):
logging.info('\tOutput root folder "%s" does not exist and will be created', output_root)
os.makedirs(output_root)
# keep trying to create new output dir until it succeeds
# this is neccessary due to improbable yet possible output dir name conflicts
while True:
try:
output_dir = path.join(output_root, '{}_{}_{}'.format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),
model_name, get_random_name()))
os.mkdir(output_dir)
break
except OSError as ex:
if ex.errno != errno.EEXIST:
raise ex
logging.info('\tOutput dir: %s', output_dir)
# create file logger
file_handler = logging.FileHandler(path.join(output_dir, CXF_LOG_FILE))
file_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))
logging.getLogger().addHandler(file_handler)
return output_dir | Create output_dir under the given ``output_root`` and
- dump the given config to YAML file under this dir
- register a file logger logging to a file under this dir
:param config: config to be dumped
:param output_root: dir wherein output_dir shall be created
:param default_model_name: name to be used when `model.name` is not found in the config
:return: path to the created output_dir | entailment |
def create_dataset(config: dict, output_dir: Optional[str]=None) -> AbstractDataset:
"""
Create a dataset object according to the given config.
Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.
:param config: config dict with dataset config
:param output_dir: path to the training output dir or None
:return: dataset object
"""
logging.info('Creating dataset')
dataset_config = make_simple(config)['dataset']
assert 'class' in dataset_config, '`dataset.class` not present in the config'
dataset_module, dataset_class = parse_fully_qualified_name(dataset_config['class'])
if 'output_dir' in dataset_config:
raise ValueError('The `output_dir` key is reserved and can not be used in dataset configuration.')
dataset_config = {'output_dir': output_dir, **config['dataset']}
del dataset_config['class']
dataset = create_object(dataset_module, dataset_class, args=(yaml_to_str(dataset_config),))
logging.info('\t%s created', type(dataset).__name__)
return dataset | Create a dataset object according to the given config.
Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.
:param config: config dict with dataset config
:param output_dir: path to the training output dir or None
:return: dataset object | entailment |
def create_model(config: dict, output_dir: Optional[str], dataset: AbstractDataset,
restore_from: Optional[str]=None) -> AbstractModel:
"""
Create a model object either from scratch of from the checkpoint in ``resume_dir``.
Cxflow allows the following scenarios
1. Create model: leave ``restore_from=None`` and specify ``class``;
2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.
:param config: config dict with model config
:param output_dir: path to the training output dir
:param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept
:param restore_from: from whence the model should be restored (backend-specific information)
:return: model object
"""
logging.info('Creating a model')
model_config = config['model']
# workaround for ruamel.yaml expansion bug; see #222
model_config = dict(model_config.items())
assert 'class' in model_config, '`model.class` not present in the config'
model_module, model_class = parse_fully_qualified_name(model_config['class'])
# create model kwargs (without `class` and `name`)
model_kwargs = {'dataset': dataset, 'log_dir': output_dir, 'restore_from': restore_from, **model_config}
del model_kwargs['class']
if 'name' in model_kwargs:
del model_kwargs['name']
try:
model = create_object(model_module, model_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex:
if restore_from is None: # training case
raise ImportError('Cannot create model from the specified model module `{}` and class `{}`.'.format(
model_module, model_class)) from ex
else: # restore cases (resume, predict)
logging.warning('Cannot create model from the specified model class `%s`.', model_config['class'])
assert 'restore_fallback' in model_config, '`model.restore_fallback` not present in the config'
logging.info('Trying to restore with fallback `%s` instead.', model_config['restore_fallback'])
try: # try fallback class
fallback_module, fallback_class = parse_fully_qualified_name(model_config['restore_fallback'])
model = create_object(fallback_module, fallback_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex: # if fallback module/class specified but it fails
raise ImportError('Cannot create model from the specified restore_fallback `{}`.'.format(
model_config['restore_fallback'],)) from ex
logging.info('\t%s created', type(model).__name__)
return model | Create a model object either from scratch of from the checkpoint in ``resume_dir``.
Cxflow allows the following scenarios
1. Create model: leave ``restore_from=None`` and specify ``class``;
2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.
:param config: config dict with model config
:param output_dir: path to the training output dir
:param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept
:param restore_from: from whence the model should be restored (backend-specific information)
:return: model object | entailment |
def create_hooks(config: dict, model: AbstractModel,
dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]:
"""
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
"""
logging.info('Creating hooks')
hooks = []
if 'hooks' in config:
for hook_config in config['hooks']:
if isinstance(hook_config, str):
hook_config = {hook_config: {}}
assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'
hook_path, hook_params = next(iter(hook_config.items()))
if hook_params is None:
logging.warning('\t\t Empty config of `%s` hook', hook_path)
hook_params = {}
# workaround for ruamel.yaml expansion bug; see #222
hook_params = dict(hook_params.items())
hook_module, hook_class = parse_fully_qualified_name(hook_path)
# find the hook module if not specified
if hook_module is None:
hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)
logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class)
if hook_module is None:
raise ValueError('Can`t find hook module for hook class `{}`. '
'Make sure it is defined under `{}` sub-modules.'
.format(hook_class, CXF_HOOKS_MODULE))
# create hook kwargs
hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}
# create new hook
try:
hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)
hooks.append(hook)
logging.info('\t%s created', type(hooks[-1]).__name__)
except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:
logging.error('\tFailed to create a hook from config `%s`', hook_config)
raise ex
return hooks | Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects | entailment |
def run(config: dict, output_root: str, restore_from: str=None, eval: Optional[str]=None) -> None:
"""
Run **cxflow** training configured by the passed `config`.
Unique ``output_dir`` for this training is created under the given ``output_root`` dir
wherein all the training outputs are saved. The output dir name will be roughly ``[model.name]_[time]``.
The training procedure consists of the following steps:
1. Set up (create output dir and file logger, dump the loaded config into the output dir)
2. Create dataset (YAML string with ``dataset`` and ``log_dir`` configs are passed to the dataset constructor)
3. Create (or restore) model (dataset, ``log_dir`` and model config is passed to the constructor)
4. Create all the training hooks
5. Create the ``MainLoop`` object
6. Run the main loop
If any of the steps fails, the training is terminated.
After the training procedure finishes, the output dir will contain the following:
- ``train_log.txt`` with entry point and main loop logs (same as the stderr)
- dumped YAML config
Additional outputs created by hooks, dataset or tensorflow may include:
- ``dataset_log.txt`` with info about dataset/stream creation
- model checkpoint(s)
- TensorBoard log file
- TensorFlow event log
:param config: configuration
:param output_root: dir under which output_dir shall be created
:param restore_from: from whence the model should be restored (backend-specific information)
:param eval: optional name of the stream to be evaluated
"""
output_dir = dataset = model = hooks = main_loop = None
try:
output_dir = create_output_dir(config=config, output_root=output_root)
except Exception as ex: # pylint: disable=broad-except
fallback('Failed to create output dir', ex)
try:
dataset = create_dataset(config=config, output_dir=output_dir)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
model = create_model(config=config, output_dir=output_dir, dataset=dataset, restore_from=restore_from)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating model failed', ex)
try: # save the config to file
# modify the config so that it contains fallback information
config['model']['restore_fallback'] = model.restore_fallback
yaml_to_file(data=config, output_dir=output_dir, name=CXF_CONFIG_FILE)
except Exception as ex: # pylint: disable=broad-except
fallback('Saving config failed', ex)
try:
hooks = create_hooks(config=config, model=model, dataset=dataset, output_dir=output_dir)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating hooks failed', ex)
try:
logging.info('Creating main loop')
kwargs = config['main_loop'] if 'main_loop' in config else {}
if eval is not None:
kwargs['extra_streams'] = []
main_loop = MainLoop(model=model, dataset=dataset, hooks=hooks, **kwargs)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating main loop failed', ex)
if eval is not None:
try:
with main_loop:
logging.info('Running the evaluation of stream `%s`', eval)
main_loop.run_evaluation(eval)
except Exception as ex: # pylint: disable=broad-except
fallback('Running the evaluation failed', ex)
else:
trace = TrainingTrace(output_dir)
try:
with main_loop:
logging.info('Running the training')
trace[TrainingTraceKeys.TRAIN_BEGIN] = datetime.now()
main_loop.run_training(trace)
trace[TrainingTraceKeys.EXIT_STATUS] = 0
except Exception as ex: # pylint: disable=broad-except
trace[TrainingTraceKeys.EXIT_STATUS] = 1
fallback('Running the training failed', ex)
except SystemExit as ex:
trace[TrainingTraceKeys.EXIT_STATUS] = ex.code
finally:
trace[TrainingTraceKeys.EPOCHS_DONE] = main_loop.epochs_done
trace[TrainingTraceKeys.TRAIN_END] = datetime.now() | Run **cxflow** training configured by the passed `config`.
Unique ``output_dir`` for this training is created under the given ``output_root`` dir
wherein all the training outputs are saved. The output dir name will be roughly ``[model.name]_[time]``.
The training procedure consists of the following steps:
1. Set up (create output dir and file logger, dump the loaded config into the output dir)
2. Create dataset (YAML string with ``dataset`` and ``log_dir`` configs are passed to the dataset constructor)
3. Create (or restore) model (dataset, ``log_dir`` and model config is passed to the constructor)
4. Create all the training hooks
5. Create the ``MainLoop`` object
6. Run the main loop
If any of the steps fails, the training is terminated.
After the training procedure finishes, the output dir will contain the following:
- ``train_log.txt`` with entry point and main loop logs (same as the stderr)
- dumped YAML config
Additional outputs created by hooks, dataset or tensorflow may include:
- ``dataset_log.txt`` with info about dataset/stream creation
- model checkpoint(s)
- TensorBoard log file
- TensorFlow event log
:param config: configuration
:param output_root: dir under which output_dir shall be created
:param restore_from: from whence the model should be restored (backend-specific information)
:param eval: optional name of the stream to be evaluated | entailment |
def _safe_rmtree(dir_: str):
"""Wrap ``shutil.rmtree`` to inform user about (un)success."""
try:
rmtree(dir_)
except OSError:
logging.warning('\t\t Skipping %s due to OSError', dir_)
else:
logging.debug('\t\t Deleted %s', dir_) | Wrap ``shutil.rmtree`` to inform user about (un)success. | entailment |
def _prune_subdirs(dir_: str) -> None:
"""
Delete all subdirs in training log dirs.
:param dir_: dir with training log dirs
"""
for logdir in [path.join(dir_, f) for f in listdir(dir_) if is_train_dir(path.join(dir_, f))]:
for subdir in [path.join(logdir, f) for f in listdir(logdir) if path.isdir(path.join(logdir, f))]:
_safe_rmtree(subdir) | Delete all subdirs in training log dirs.
:param dir_: dir with training log dirs | entailment |
def _prune(dir_: str, epochs: int) -> None:
"""
Delete all training dirs with incomplete training artifacts or with less than specified epochs done.
:param dir_: dir with training log dirs
:param epochs: minimum number of finished epochs to keep the training logs
:return: number of log dirs pruned
"""
for logdir in [path.join(dir_, f) for f in listdir(dir_) if path.isdir(path.join(dir_, f))]:
if not is_train_dir(logdir):
_safe_rmtree(logdir)
else:
trace_path = path.join(logdir, CXF_TRACE_FILE)
try:
epochs_done = TrainingTrace.from_file(trace_path)[TrainingTraceKeys.EPOCHS_DONE]
except (KeyError, TypeError):
epochs_done = 0
if not epochs_done or epochs_done < epochs:
_safe_rmtree(logdir) | Delete all training dirs with incomplete training artifacts or with less than specified epochs done.
:param dir_: dir with training log dirs
:param epochs: minimum number of finished epochs to keep the training logs
:return: number of log dirs pruned | entailment |
def prune_train_dirs(dir_: str, epochs: int, subdirs: bool) -> None:
"""
Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs
"""
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
_prune(dir_, epochs)
if subdirs:
_prune_subdirs(dir_) | Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs | entailment |
def output_names(self) -> Iterable[str]:
"""List of model output names."""
self._load_models()
return chain.from_iterable(map(lambda m: m.output_names, self._models)) | List of model output names. | entailment |
def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:
"""
Run all the models in-order and return accumulated outputs.
N-th model is fed with the original inputs and outputs of all the models that were run before it.
.. warning::
:py:class:`Sequence` model can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: accumulated model outputs
:raise ValueError: if the ``train`` flag is set to ``True``
"""
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models in-order
current_batch = dict(copy.deepcopy(batch))
for model in self._models:
current_batch.update(model.run(current_batch, False, None))
return {key: current_batch[key] for key in self.output_names} | Run all the models in-order and return accumulated outputs.
N-th model is fed with the original inputs and outputs of all the models that were run before it.
.. warning::
:py:class:`Sequence` model can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: accumulated model outputs
:raise ValueError: if the ``train`` flag is set to ``True`` | entailment |
def _log_variables(self, epoch_data: EpochData):
"""
Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be logged was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if np.isscalar(value):
logging.info('\t%s %s: %f', stream_name, variable, value)
elif isinstance(value, dict):
keys = list(value.keys())
if len(keys) == 1:
logging.info('\t%s %s %s: %f', stream_name, variable, keys[0], value[keys[0]])
else:
logging.info('\t%s %s:', stream_name, variable)
for key, val in value.items():
logging.info('\t\t%s: %f', key, val)
else:
if self._on_unknown_type == 'error':
raise TypeError('Variable type `{}` can not be logged. Variable name: `{}`.'
.format(type(value).__name__, variable))
elif self._on_unknown_type == 'warn':
logging.warning('Variable type `%s` can not be logged. Variable name: `%s`.',
type(value).__name__, variable)
elif self._on_unknown_type == 'str':
logging.info('\t%s %s: %s', stream_name, variable, value) | Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error`` | entailment |
def get_reference_end_from_cigar(reference_start, cigar):
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end | This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method | entailment |
def set_order_by_clip(self, a, b):
'''
Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation
'''
if self.is_left_clip(a.cigar):
self.query_left = b
self.query_right = a
else:
self.query_left = a
self.query_right = b | Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation | entailment |
def is_left_clip(self, cigar):
'''
whether the left side of the read (w/ respect to reference) is clipped.
Clipping side is determined as the side with the longest clip.
Adjacent clipping operations are not considered
'''
left_tuple = cigar[0]
right_tuple = cigar[-1]
left_clipped = self.is_clip_op(left_tuple[0])
right_clipped = self.is_clip_op(right_tuple[0])
return (left_clipped and not right_clipped) or (left_clipped and right_clipped and left_tuple[1] > right_tuple[1]) | whether the left side of the read (w/ respect to reference) is clipped.
Clipping side is determined as the side with the longest clip.
Adjacent clipping operations are not considered | entailment |
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med)) | Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation | entailment |
def _is_streaming_request(self):
"""check request is stream request or not"""
arg2 = self.argstreams[1]
arg3 = self.argstreams[2]
return not (isinstance(arg2, InMemStream) and
isinstance(arg3, InMemStream) and
((arg2.auto_close and arg3.auto_close) or (
arg2.state == StreamState.completed and
arg3.state == StreamState.completed))) | check request is stream request or not | entailment |
def should_retry_on_error(self, error):
"""rules for retry
:param error:
ProtocolException that returns from Server
"""
if self.is_streaming_request:
# not retry for streaming request
return False
retry_flag = self.headers.get('re', retry.DEFAULT)
if retry_flag == retry.NEVER:
return False
if isinstance(error, StreamClosedError):
return True
if error.code in [ErrorCode.bad_request, ErrorCode.cancelled,
ErrorCode.unhealthy]:
return False
elif error.code in [ErrorCode.busy, ErrorCode.declined]:
return True
elif error.code is ErrorCode.timeout:
return retry_flag is not retry.CONNECTION_ERROR
elif error.code in [ErrorCode.network_error,
ErrorCode.fatal,
ErrorCode.unexpected]:
return retry_flag is not retry.TIMEOUT
else:
return False | rules for retry
:param error:
ProtocolException that returns from Server | entailment |
def client_for(service, service_module, thrift_service_name=None):
"""Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ = '__init__'
methods = {
'__init__': init,
'__async_client_class__': async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + 'Client', (object,), methods) | Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient. | entailment |
def generate_method(method_name):
"""Generate a method for a given Thrift service.
Uses the provided TChannelSyncClient's threadloop in order
to convert RPC calls to concurrent.futures
:param method_name: Method being called.
:return: A method that invokes the RPC using TChannelSyncClient
"""
def call(self, *args, **kwargs):
"""Forward RPC call to TChannelSyncClient
:return concurrent.futures.Future:
"""
if not self.threadloop.is_ready():
self.threadloop.start()
return self.threadloop.submit(
getattr(self.async_thrift, method_name), *args, **kwargs
)
return call | Generate a method for a given Thrift service.
Uses the provided TChannelSyncClient's threadloop in order
to convert RPC calls to concurrent.futures
:param method_name: Method being called.
:return: A method that invokes the RPC using TChannelSyncClient | entailment |
def read_full(stream):
"""Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents.
"""
assert stream, "stream is required"
chunks = []
chunk = yield stream.read()
while chunk:
chunks.append(chunk)
chunk = yield stream.read()
raise tornado.gen.Return(b''.join(chunks)) | Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents. | entailment |
def maybe_stream(s):
"""Ensure that the given argument is a stream."""
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close() # we don't intend to write anything
return stream
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(s, bytearray):
s = bytes(s)
if isinstance(s, bytes):
stream = InMemStream(s)
stream.close() # we don't intend to write anything
return stream
# s may still conform to the Stream interface. Yay duck typing.
return s | Ensure that the given argument is a stream. | entailment |
def build_raw_error_message(protocol_exception):
"""build protocol level error message based on Error object"""
message = ErrorMessage(
id=protocol_exception.id,
code=protocol_exception.code,
tracing=protocol_exception.tracing,
description=protocol_exception.description,
)
return message | build protocol level error message based on Error object | entailment |
def build_raw_request_message(self, request, args, is_completed=False):
"""build protocol level message based on request and args.
request object contains meta information about outgoing request.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param request: Request
:param args: array of arg streams
:param is_completed: message flags
:return: CallRequestMessage/CallRequestContinueMessage
"""
request.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if request.state == StreamState.init:
message = CallRequestMessage(
flags=request.flags,
ttl=request.ttl * 1000,
tracing=request.tracing,
service=request.service,
headers=request.headers,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif request.state == StreamState.streaming:
message = CallRequestContinueMessage(
flags=request.flags,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = request.id
return message | build protocol level message based on request and args.
request object contains meta information about outgoing request.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param request: Request
:param args: array of arg streams
:param is_completed: message flags
:return: CallRequestMessage/CallRequestContinueMessage | entailment |
def build_raw_response_message(self, response, args, is_completed=False):
"""build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage
"""
response.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if response.state == StreamState.init:
message = CallResponseMessage(
flags=response.flags,
code=response.code,
tracing=response.tracing,
headers=response.headers,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif response.state == StreamState.streaming:
message = CallResponseContinueMessage(
flags=response.flags,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = response.id
return message | build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage | entailment |
def build_request(self, message):
"""Build inbound request object from protocol level message info.
It is allowed to take incompleted CallRequestMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallRequestMessage
:return: request object
"""
args = self.prepare_args(message)
# TODO decide what to pass to Request from message
req = Request(
flags=message.flags,
ttl=message.ttl / 1000.0,
tracing=message.tracing,
service=message.service,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return req | Build inbound request object from protocol level message info.
It is allowed to take incompleted CallRequestMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallRequestMessage
:return: request object | entailment |
def build_response(self, message):
"""Build response object from protocol level message info
It is allowed to take incompleted CallResponseMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallResponseMessage
:return: response object
"""
args = self.prepare_args(message)
# TODO decide what to pass to Response from message
res = Response(
flags=message.flags,
code=message.code,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return res | Build response object from protocol level message info
It is allowed to take incompleted CallResponseMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallResponseMessage
:return: response object | entailment |
def build(self, message):
"""buffer all the streaming messages based on the
message id. Reconstruct all fragments together.
:param message:
incoming message
:return: next complete message or None if streaming
is not done
"""
context = None
if message.message_type in [Types.CALL_REQ,
Types.CALL_RES]:
self.verify_message(message)
context = self.build_context(message)
# streaming message
if message.flags == common.FlagsType.fragment:
self.message_buffer[message.id] = context
# find the incompleted stream
num = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
num = i
break
self.close_argstream(context, num)
return context
elif message.message_type in [Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
context = self.message_buffer.get(message.id)
if context is None:
# missing call msg before continue msg
raise FatalProtocolError(
"missing call message after receiving continue message",
message.id,
)
# find the incompleted stream
dst = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
dst = i
break
try:
self.verify_message(message)
except InvalidChecksumError as e:
context.argstreams[dst].set_exception(e)
raise
src = 0
while src < len(message.args):
context.argstreams[dst].write(message.args[src])
dst += 1
src += 1
if message.flags != FlagsType.fragment:
# get last fragment. mark it as completed
assert (len(context.argstreams) ==
CallContinueMessage.max_args_num)
self.message_buffer.pop(message.id, None)
context.flags = FlagsType.none
self.close_argstream(context, dst - 1)
return None
elif message.message_type == Types.ERROR:
context = self.message_buffer.pop(message.id, None)
if context is None:
log.info('Unconsumed error %s', message)
return None
else:
error = TChannelError.from_code(
message.code,
description=message.description,
tracing=context.tracing,
)
context.set_exception(error)
return error
else:
return message | buffer all the streaming messages based on the
message id. Reconstruct all fragments together.
:param message:
incoming message
:return: next complete message or None if streaming
is not done | entailment |
def fragment(self, message):
"""Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size
"""
if message.message_type in [Types.CALL_RES,
Types.CALL_REQ,
Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
# split a call/request message into an array
# with a call/request message and {0~n} continue
# message
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
while fragment_msg is not None:
message = fragment_msg
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
else:
yield message | Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size | entailment |
def verify_message(self, message):
"""Verify the checksum of the message."""
if verify_checksum(
message,
self.in_checksum.get(message.id, 0),
):
self.in_checksum[message.id] = message.checksum[1]
if message.flags == FlagsType.none:
self.in_checksum.pop(message.id)
else:
self.in_checksum.pop(message.id, None)
raise InvalidChecksumError(
description="Checksum does not match!",
id=message.id,
) | Verify the checksum of the message. | entailment |
def chain(*rws):
"""Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters
"""
assert rws is not None
if len(rws) == 1 and isinstance(rws[0], list):
# In case someone does chain([l0, l1, ...])
rws = rws[0]
return ChainReadWriter(rws) | Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters | entailment |
def take(self, stream, num):
"""Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected
"""
s = stream.read(num)
slen = len(s)
if slen != num:
raise ReadError(
"Expected %d bytes but got %d bytes." % (num, slen)
)
return s | Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected | entailment |
def get_service_methods(iface):
"""Get a list of methods defined in the interface for a Thrift service.
:param iface:
The Thrift-generated Iface class defining the interface for the
service.
:returns:
A set containing names of the methods defined for the service.
"""
methods = inspect.getmembers(iface, predicate=inspect.ismethod)
return set(
name for (name, method) in methods if not name.startswith('__')
) | Get a list of methods defined in the interface for a Thrift service.
:param iface:
The Thrift-generated Iface class defining the interface for the
service.
:returns:
A set containing names of the methods defined for the service. | entailment |
def deprecate(message):
"""Loudly prints warning."""
warnings.simplefilter('default')
warnings.warn(message, category=DeprecationWarning)
warnings.resetwarnings() | Loudly prints warning. | entailment |
def deprecated(message):
"""Warn every time a fn is called."""
def decorator(fn):
@functools.wraps(fn)
def new_fn(*args, **kwargs):
deprecate(message)
return fn(*args, **kwargs)
return new_fn
return decorator | Warn every time a fn is called. | entailment |
def load(path, service=None, hostport=None, module_name=None):
"""Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file.
"""
# TODO replace with more specific exceptions
# assert service, 'service is required'
# assert path, 'path is required'
# Backwards compatibility for callers passing in service name as first arg.
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport) | Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file. | entailment |
def register(dispatcher, service, handler=None, method=None):
"""
:param dispatcher:
RequestDispatcher against which the new endpoint will be registered.
:param Service service:
Service object representing the service whose endpoint is being
registered.
:param handler:
A function implementing the given Thrift function.
:param method:
If specified, name of the method being registered. Defaults to the
name of the ``handler`` function.
"""
def decorator(method, handler):
if not method:
method = handler.__name__
function = getattr(service, method, None)
assert function, (
'Service "%s" does not define method "%s"' % (service.name, method)
)
assert not function.oneway
dispatcher.register(
function.endpoint,
build_handler(function, handler),
ThriftRWSerializer(service._module, function._request_cls),
ThriftRWSerializer(service._module, function._response_cls),
)
return handler
if handler is None:
return partial(decorator, method)
else:
return decorator(method, handler) | :param dispatcher:
RequestDispatcher against which the new endpoint will be registered.
:param Service service:
Service object representing the service whose endpoint is being
registered.
:param handler:
A function implementing the given Thrift function.
:param method:
If specified, name of the method being registered. Defaults to the
name of the ``handler`` function. | entailment |
def interface_ip(interface):
"""Determine the IP assigned to us by the given network interface."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(
sock.fileno(), 0x8915, struct.pack('256s', interface[:15])
)[20:24]
) | Determine the IP assigned to us by the given network interface. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.