code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) | def load_yaml(yaml_file: str) -> Any | Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list | 2.611892 | 3.041967 | 0.85862 |
dumped_config_f = path.join(output_dir, name)
with open(dumped_config_f, 'w') as file:
yaml.dump(data, file, Dumper=ruamel.yaml.RoundTripDumper)
return dumped_config_f | def yaml_to_file(data: Mapping, output_dir: str, name: str) -> str | Save the given object to the given path in YAML.
:param data: dict/list to be dumped
:param output_dir: target output directory
:param name: target filename
:return: target path | 3.061754 | 3.019099 | 1.014129 |
return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper) | def yaml_to_str(data: Mapping) -> str | Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str | 4.682037 | 5.583017 | 0.838621 |
return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.Loader) | def make_simple(data: Any) -> Any | Substitute all the references in the given data (typically a mapping or sequence) with the actual values.
This is useful, if you loaded a yaml with RoundTripLoader and you need to dump part of it safely.
:param data: data to be made simple (dict instead of CommentedMap etc.)
:return: simplified data | 4.411144 | 4.895161 | 0.901123 |
return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.RoundTripLoader) | def reload(data: Any) -> Any | Dump and load yaml data.
This is useful to avoid many anchor parsing bugs. When you edit a yaml config, reload it to make sure
the changes are propagated to anchor expansions.
:param data: data to be reloaded
:return: reloaded data | 3.539309 | 3.721967 | 0.950924 |
super().after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)
self._saved_loss.append(epoch_data[self._stream][self._variable][OnPlateau._AGGREGATION])
long_mean = np.mean(self._saved_loss[-self._long_term:])
short_mean = np.mean(self._saved_loss[-self._short_term:])
if self._objective == 'min' and long_mean < short_mean:
self._on_plateau_action(epoch_id=epoch_id, epoch_data=epoch_data)
elif self._objective == 'max' and long_mean > short_mean:
self._on_plateau_action(epoch_id=epoch_id, epoch_data=epoch_data) | def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None | Call :py:meth:`_on_plateau_action` if the ``long_term``
variable mean is lower/greater than the ``short_term`` mean. | 2.854112 | 2.222464 | 1.284211 |
if isinstance(data, np.ndarray) or isinstance(data, list):
return any(np.isnan(data)) or (self._stop_on_inf and any(np.isinf(data)))
elif np.isscalar(data):
return np.isnan(data) or (self._stop_on_inf and np.isinf(data))
elif isinstance(data, dict):
return any([self._is_nan(key, value) for key, value in data.items()])
else:
message = 'Variable `{}` of type `{}` can not be checked for NaNs.'.format(variable, type(data))
if self._on_unkown_type == 'warn':
logging.warning(message)
elif self._on_unkown_type == 'error':
raise ValueError(message)
return False | def _is_nan(self, variable: str, data) -> bool | Recursively search passed data and find NaNs.
:param variable: name of variable to be checked
:param data: data object (dict, list, scalar)
:return: `True` if there is a NaN value in the data; `False` otherwise.
:raise ValueError: if the variable value is of unsupported type and ``on_unknown_type`` is set to ``error`` | 2.334921 | 2.099444 | 1.112162 |
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be nan-checked was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if self._is_nan(variable, value):
raise TrainingTerminated('Variable `{}` is NaN.'.format(variable)) | def _check_nan(self, epoch_data: EpochData) -> None | Raise an exception when some of the monitored data is NaN.
:param epoch_data: epoch data checked
:raise KeyError: if the specified variable is not found in the stream
:raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error`` | 3.21966 | 2.843143 | 1.13243 |
if self._after_epoch:
self._check_nan(epoch_data) | def after_epoch(self, epoch_data: EpochData, **kwargs) -> None | If initialized to check after each epoch, stop the training once the epoch data contains a monitored
variable equal to NaN.
:param epoch_data: epoch data to be checked | 12.838044 | 7.177675 | 1.788608 |
if self._after_batch:
self._check_nan({stream_name: batch_data}) | def after_batch(self, stream_name: str, batch_data) -> None | If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked | 14.460238 | 7.72884 | 1.870945 |
if epoch_id % self._n_epochs == 0:
self._after_n_epoch(epoch_id=epoch_id, **kwargs) | def after_epoch(self, epoch_id: int, **kwargs) -> None | Call ``_after_n_epoch`` method every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch | 4.799046 | 3.152905 | 1.522103 |
if path.isfile(path_):
return path.getsize(path_)
total_size = 0
for root_dir, _, files in os.walk(path_):
for file_ in files:
total_size += path.getsize(path.join(root_dir, file_))
return total_size | def path_total_size(path_: str) -> int | Compute total size of the given file/dir. | 1.993232 | 1.73125 | 1.151326 |
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if filesize < 1024.0:
return '{:3.1f}'.format(filesize), unit+'B'
filesize /= 1024.0 | def humanize_filesize(filesize: int) -> Tuple[str, str] | Return human readable pair of size and unit from the given filesize in bytes. | 2.034435 | 1.883043 | 1.080398 |
return path.exists(path.join(dir_, CXF_CONFIG_FILE)) and \
path.exists(path.join(dir_, CXF_TRACE_FILE)) and \
path.exists(path.join(dir_, CXF_LOG_FILE)) | def is_train_dir(dir_: str) -> bool | Test if the given dir contains training artifacts. | 3.248046 | 2.676156 | 1.213698 |
if is_train_dir(root_dir):
yield '', [root_dir]
return
for dir_, subdirs, _ in os.walk(root_dir, topdown=True):
# filter train sub-dirs
train_subdirs = [subdir for subdir in subdirs if is_train_dir(path.join(dir_, subdir))]
# stop the recursion at the train sub-dirs
for subdir in train_subdirs:
subdirs.remove(subdir)
yield dir_, train_subdirs | def walk_train_dirs(root_dir: str) -> Iterable[Tuple[str, Iterable[str]]] | Modify os.walk with the following:
- return only root_dir and sub-dirs
- return only training sub-dirs
- stop recursion at training dirs
:param root_dir: root dir to be walked
:return: generator of (root_dir, training sub-dirs) pairs | 2.91886 | 2.975712 | 0.980895 |
long_table = []
for train_dir, config, trace in trainings:
start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]
if start_datetime:
age = format_timedelta(datetime.now() - start_datetime) + ' ago'
if end_datetime:
duration = format_timedelta(end_datetime - start_datetime)
else:
duration = CXF_NA_STR
else:
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0
long_table.append([path.basename(train_dir)] +
list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +
[age, duration, epochs_done])
print(tabulate(long_table, tablefmt='plain')) | def _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None | Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace) | 3.232498 | 3.031741 | 1.066218 |
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]] | Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace) | 3.102056 | 2.92047 | 1.062177 |
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed('summary')
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))
print() | def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None | Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace) | 2.316856 | 2.223503 | 1.041985 |
train_dir, config, _ = training
print_boxed('config')
print(yaml_to_str(config))
print()
print_boxed('artifacts')
_, dirs, files = next(os.walk(train_dir))
artifacts = [('d', dir) for dir in dirs] + \
[('-', file_) for file_ in files if file_ not in [CXF_CONFIG_FILE, CXF_LOG_FILE, CXF_TRACE_FILE]]
artifacts = [(type_, name) + humanize_filesize(path_total_size(path.join(train_dir, name)))
for type_, name in artifacts]
print(tabulate(artifacts, tablefmt='plain', floatfmt='3.1f'))
print() | def _ls_print_verbose(training: Tuple[str, dict, str]) -> None | Print config and artifacts info from the given training tuple (train_dir, configuration dict, trace).
:param training: training tuple (train_dir, configuration dict, trace) | 4.582941 | 4.217027 | 1.08677 |
if verbose:
long = True
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
all_trainings = _ls_print_listing(dir_, recursive, all_, long)
if long and len(all_trainings) > 1:
if not recursive:
print()
_ls_print_summary(all_trainings)
if verbose and len(all_trainings) == 1:
if not recursive:
print()
_ls_print_verbose(all_trainings[0]) | def list_train_dirs(dir_: str, recursive: bool, all_: bool, long: bool, verbose: bool) -> None | List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.
The function is accessible through cxflow CLI `cxflow ls`.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, odel and dataset class,
age, duration and epochs done (--long option)
:param verbose: print more verbose output with list of additional artifacts and training config,
applicable only when a single train dir is listed (--verbose option) | 3.472214 | 2.981399 | 1.164626 |
for variable in self._variables:
if variable in batch_data:
value = batch_data[variable]
if not hasattr(value, '__iter__'):
raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))
self._accumulator[stream_name][variable] += list(value)
else:
raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '
'Available variables are `{}`.'.format(variable, batch_data.keys())) | def after_batch(self, stream_name: str, batch_data: Batch) | Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar) | 3.265063 | 2.477293 | 1.317996 |
config = dataset = method = output_dir = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
assert 'dataset' in config, '`dataset` section not present in the config'
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
try:
dataset = create_dataset(config)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
method = getattr(dataset, method_name)
except AttributeError as ex:
fallback('Method `{}` not found in the dataset'.format(method_name), ex)
try:
method()
except Exception as ex: # pylint: disable=broad-except
fallback('Exception occurred during method `{}` invocation'.format(method_name), ex) | def invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None | Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | 2.721755 | 2.680265 | 1.01548 |
if self._num_signals == 0:
logging.warning('Interrupt signal caught - training will be terminated')
logging.warning('Another interrupt signal will terminate the program immediately')
self._num_signals += 1
else:
logging.error('Another interrupt signal caught - terminating program immediately')
sys.exit(2) | def _signal_handler(self, *_) -> None | On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal. | 6.211443 | 4.458033 | 1.393315 |
logging.info('Creating output dir')
# create output dir
model_name = default_model_name
if 'name' not in config['model']:
logging.warning('\tmodel.name not found in config, defaulting to: %s', model_name)
else:
model_name = config['model']['name']
if not os.path.exists(output_root):
logging.info('\tOutput root folder "%s" does not exist and will be created', output_root)
os.makedirs(output_root)
# keep trying to create new output dir until it succeeds
# this is neccessary due to improbable yet possible output dir name conflicts
while True:
try:
output_dir = path.join(output_root, '{}_{}_{}'.format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),
model_name, get_random_name()))
os.mkdir(output_dir)
break
except OSError as ex:
if ex.errno != errno.EEXIST:
raise ex
logging.info('\tOutput dir: %s', output_dir)
# create file logger
file_handler = logging.FileHandler(path.join(output_dir, CXF_LOG_FILE))
file_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))
logging.getLogger().addHandler(file_handler)
return output_dir | def create_output_dir(config: dict, output_root: str, default_model_name: str='Unnamed') -> str | Create output_dir under the given ``output_root`` and
- dump the given config to YAML file under this dir
- register a file logger logging to a file under this dir
:param config: config to be dumped
:param output_root: dir wherein output_dir shall be created
:param default_model_name: name to be used when `model.name` is not found in the config
:return: path to the created output_dir | 2.599987 | 2.55605 | 1.017189 |
logging.info('Creating dataset')
dataset_config = make_simple(config)['dataset']
assert 'class' in dataset_config, '`dataset.class` not present in the config'
dataset_module, dataset_class = parse_fully_qualified_name(dataset_config['class'])
if 'output_dir' in dataset_config:
raise ValueError('The `output_dir` key is reserved and can not be used in dataset configuration.')
dataset_config = {'output_dir': output_dir, **config['dataset']}
del dataset_config['class']
dataset = create_object(dataset_module, dataset_class, args=(yaml_to_str(dataset_config),))
logging.info('\t%s created', type(dataset).__name__)
return dataset | def create_dataset(config: dict, output_dir: Optional[str]=None) -> AbstractDataset | Create a dataset object according to the given config.
Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.
:param config: config dict with dataset config
:param output_dir: path to the training output dir or None
:return: dataset object | 4.025305 | 3.896024 | 1.033183 |
logging.info('Creating a model')
model_config = config['model']
# workaround for ruamel.yaml expansion bug; see #222
model_config = dict(model_config.items())
assert 'class' in model_config, '`model.class` not present in the config'
model_module, model_class = parse_fully_qualified_name(model_config['class'])
# create model kwargs (without `class` and `name`)
model_kwargs = {'dataset': dataset, 'log_dir': output_dir, 'restore_from': restore_from, **model_config}
del model_kwargs['class']
if 'name' in model_kwargs:
del model_kwargs['name']
try:
model = create_object(model_module, model_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex:
if restore_from is None: # training case
raise ImportError('Cannot create model from the specified model module `{}` and class `{}`.'.format(
model_module, model_class)) from ex
else: # restore cases (resume, predict)
logging.warning('Cannot create model from the specified model class `%s`.', model_config['class'])
assert 'restore_fallback' in model_config, '`model.restore_fallback` not present in the config'
logging.info('Trying to restore with fallback `%s` instead.', model_config['restore_fallback'])
try: # try fallback class
fallback_module, fallback_class = parse_fully_qualified_name(model_config['restore_fallback'])
model = create_object(fallback_module, fallback_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex: # if fallback module/class specified but it fails
raise ImportError('Cannot create model from the specified restore_fallback `{}`.'.format(
model_config['restore_fallback'],)) from ex
logging.info('\t%s created', type(model).__name__)
return model | def create_model(config: dict, output_dir: Optional[str], dataset: AbstractDataset,
restore_from: Optional[str]=None) -> AbstractModel | Create a model object either from scratch of from the checkpoint in ``resume_dir``.
Cxflow allows the following scenarios
1. Create model: leave ``restore_from=None`` and specify ``class``;
2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.
:param config: config dict with model config
:param output_dir: path to the training output dir
:param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept
:param restore_from: from whence the model should be restored (backend-specific information)
:return: model object | 3.11024 | 3.166408 | 0.982261 |
logging.info('Creating hooks')
hooks = []
if 'hooks' in config:
for hook_config in config['hooks']:
if isinstance(hook_config, str):
hook_config = {hook_config: {}}
assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'
hook_path, hook_params = next(iter(hook_config.items()))
if hook_params is None:
logging.warning('\t\t Empty config of `%s` hook', hook_path)
hook_params = {}
# workaround for ruamel.yaml expansion bug; see #222
hook_params = dict(hook_params.items())
hook_module, hook_class = parse_fully_qualified_name(hook_path)
# find the hook module if not specified
if hook_module is None:
hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)
logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class)
if hook_module is None:
raise ValueError('Can`t find hook module for hook class `{}`. '
'Make sure it is defined under `{}` sub-modules.'
.format(hook_class, CXF_HOOKS_MODULE))
# create hook kwargs
hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}
# create new hook
try:
hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)
hooks.append(hook)
logging.info('\t%s created', type(hooks[-1]).__name__)
except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:
logging.error('\tFailed to create a hook from config `%s`', hook_config)
raise ex
return hooks | def create_hooks(config: dict, model: AbstractModel,
dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook] | Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects | 3.109516 | 3.090311 | 1.006215 |
output_dir = dataset = model = hooks = main_loop = None
try:
output_dir = create_output_dir(config=config, output_root=output_root)
except Exception as ex: # pylint: disable=broad-except
fallback('Failed to create output dir', ex)
try:
dataset = create_dataset(config=config, output_dir=output_dir)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
model = create_model(config=config, output_dir=output_dir, dataset=dataset, restore_from=restore_from)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating model failed', ex)
try: # save the config to file
# modify the config so that it contains fallback information
config['model']['restore_fallback'] = model.restore_fallback
yaml_to_file(data=config, output_dir=output_dir, name=CXF_CONFIG_FILE)
except Exception as ex: # pylint: disable=broad-except
fallback('Saving config failed', ex)
try:
hooks = create_hooks(config=config, model=model, dataset=dataset, output_dir=output_dir)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating hooks failed', ex)
try:
logging.info('Creating main loop')
kwargs = config['main_loop'] if 'main_loop' in config else {}
if eval is not None:
kwargs['extra_streams'] = []
main_loop = MainLoop(model=model, dataset=dataset, hooks=hooks, **kwargs)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating main loop failed', ex)
if eval is not None:
try:
with main_loop:
logging.info('Running the evaluation of stream `%s`', eval)
main_loop.run_evaluation(eval)
except Exception as ex: # pylint: disable=broad-except
fallback('Running the evaluation failed', ex)
else:
trace = TrainingTrace(output_dir)
try:
with main_loop:
logging.info('Running the training')
trace[TrainingTraceKeys.TRAIN_BEGIN] = datetime.now()
main_loop.run_training(trace)
trace[TrainingTraceKeys.EXIT_STATUS] = 0
except Exception as ex: # pylint: disable=broad-except
trace[TrainingTraceKeys.EXIT_STATUS] = 1
fallback('Running the training failed', ex)
except SystemExit as ex:
trace[TrainingTraceKeys.EXIT_STATUS] = ex.code
finally:
trace[TrainingTraceKeys.EPOCHS_DONE] = main_loop.epochs_done
trace[TrainingTraceKeys.TRAIN_END] = datetime.now() | def run(config: dict, output_root: str, restore_from: str=None, eval: Optional[str]=None) -> None | Run **cxflow** training configured by the passed `config`.
Unique ``output_dir`` for this training is created under the given ``output_root`` dir
wherein all the training outputs are saved. The output dir name will be roughly ``[model.name]_[time]``.
The training procedure consists of the following steps:
1. Set up (create output dir and file logger, dump the loaded config into the output dir)
2. Create dataset (YAML string with ``dataset`` and ``log_dir`` configs are passed to the dataset constructor)
3. Create (or restore) model (dataset, ``log_dir`` and model config is passed to the constructor)
4. Create all the training hooks
5. Create the ``MainLoop`` object
6. Run the main loop
If any of the steps fails, the training is terminated.
After the training procedure finishes, the output dir will contain the following:
- ``train_log.txt`` with entry point and main loop logs (same as the stderr)
- dumped YAML config
Additional outputs created by hooks, dataset or tensorflow may include:
- ``dataset_log.txt`` with info about dataset/stream creation
- model checkpoint(s)
- TensorBoard log file
- TensorFlow event log
:param config: configuration
:param output_root: dir under which output_dir shall be created
:param restore_from: from whence the model should be restored (backend-specific information)
:param eval: optional name of the stream to be evaluated | 2.266219 | 2.105311 | 1.07643 |
try:
rmtree(dir_)
except OSError:
logging.warning('\t\t Skipping %s due to OSError', dir_)
else:
logging.debug('\t\t Deleted %s', dir_) | def _safe_rmtree(dir_: str) | Wrap ``shutil.rmtree`` to inform user about (un)success. | 4.033348 | 3.672024 | 1.098399 |
for logdir in [path.join(dir_, f) for f in listdir(dir_) if is_train_dir(path.join(dir_, f))]:
for subdir in [path.join(logdir, f) for f in listdir(logdir) if path.isdir(path.join(logdir, f))]:
_safe_rmtree(subdir) | def _prune_subdirs(dir_: str) -> None | Delete all subdirs in training log dirs.
:param dir_: dir with training log dirs | 2.808103 | 2.519232 | 1.114666 |
for logdir in [path.join(dir_, f) for f in listdir(dir_) if path.isdir(path.join(dir_, f))]:
if not is_train_dir(logdir):
_safe_rmtree(logdir)
else:
trace_path = path.join(logdir, CXF_TRACE_FILE)
try:
epochs_done = TrainingTrace.from_file(trace_path)[TrainingTraceKeys.EPOCHS_DONE]
except (KeyError, TypeError):
epochs_done = 0
if not epochs_done or epochs_done < epochs:
_safe_rmtree(logdir) | def _prune(dir_: str, epochs: int) -> None | Delete all training dirs with incomplete training artifacts or with less than specified epochs done.
:param dir_: dir with training log dirs
:param epochs: minimum number of finished epochs to keep the training logs
:return: number of log dirs pruned | 3.365507 | 3.093907 | 1.087785 |
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
_prune(dir_, epochs)
if subdirs:
_prune_subdirs(dir_) | def prune_train_dirs(dir_: str, epochs: int, subdirs: bool) -> None | Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs | 3.763792 | 3.426805 | 1.098339 |
self._load_models()
return chain.from_iterable(map(lambda m: m.output_names, self._models)) | def output_names(self) -> Iterable[str] | List of model output names. | 4.666651 | 4.359523 | 1.07045 |
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models in-order
current_batch = dict(copy.deepcopy(batch))
for model in self._models:
current_batch.update(model.run(current_batch, False, None))
return {key: current_batch[key] for key in self.output_names} | def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch | Run all the models in-order and return accumulated outputs.
N-th model is fed with the original inputs and outputs of all the models that were run before it.
.. warning::
:py:class:`Sequence` model can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: accumulated model outputs
:raise ValueError: if the ``train`` flag is set to ``True`` | 4.676033 | 4.386025 | 1.066121 |
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be logged was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if np.isscalar(value):
logging.info('\t%s %s: %f', stream_name, variable, value)
elif isinstance(value, dict):
keys = list(value.keys())
if len(keys) == 1:
logging.info('\t%s %s %s: %f', stream_name, variable, keys[0], value[keys[0]])
else:
logging.info('\t%s %s:', stream_name, variable)
for key, val in value.items():
logging.info('\t\t%s: %f', key, val)
else:
if self._on_unknown_type == 'error':
raise TypeError('Variable type `{}` can not be logged. Variable name: `{}`.'
.format(type(value).__name__, variable))
elif self._on_unknown_type == 'warn':
logging.warning('Variable type `%s` can not be logged. Variable name: `%s`.',
type(value).__name__, variable)
elif self._on_unknown_type == 'str':
logging.info('\t%s %s: %s', stream_name, variable, value) | def _log_variables(self, epoch_data: EpochData) | Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error`` | 1.972462 | 1.791961 | 1.100728 |
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end | def get_reference_end_from_cigar(reference_start, cigar) | This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method | 5.201782 | 3.103361 | 1.676177 |
'''
Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation
'''
if self.is_left_clip(a.cigar):
self.query_left = b
self.query_right = a
else:
self.query_left = a
self.query_right = b | def set_order_by_clip(self, a, b) | Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation | 8.365683 | 3.150931 | 2.654987 |
'''
whether the left side of the read (w/ respect to reference) is clipped.
Clipping side is determined as the side with the longest clip.
Adjacent clipping operations are not considered
'''
left_tuple = cigar[0]
right_tuple = cigar[-1]
left_clipped = self.is_clip_op(left_tuple[0])
right_clipped = self.is_clip_op(right_tuple[0])
return (left_clipped and not right_clipped) or (left_clipped and right_clipped and left_tuple[1] > right_tuple[1]) | def is_left_clip(self, cigar) | whether the left side of the read (w/ respect to reference) is clipped.
Clipping side is determined as the side with the longest clip.
Adjacent clipping operations are not considered | 3.714971 | 1.965673 | 1.889923 |
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med)) | def mad(arr) | Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation | 3.609717 | 4.087406 | 0.883132 |
arg2 = self.argstreams[1]
arg3 = self.argstreams[2]
return not (isinstance(arg2, InMemStream) and
isinstance(arg3, InMemStream) and
((arg2.auto_close and arg3.auto_close) or (
arg2.state == StreamState.completed and
arg3.state == StreamState.completed))) | def _is_streaming_request(self) | check request is stream request or not | 4.397078 | 3.928567 | 1.119257 |
if self.is_streaming_request:
# not retry for streaming request
return False
retry_flag = self.headers.get('re', retry.DEFAULT)
if retry_flag == retry.NEVER:
return False
if isinstance(error, StreamClosedError):
return True
if error.code in [ErrorCode.bad_request, ErrorCode.cancelled,
ErrorCode.unhealthy]:
return False
elif error.code in [ErrorCode.busy, ErrorCode.declined]:
return True
elif error.code is ErrorCode.timeout:
return retry_flag is not retry.CONNECTION_ERROR
elif error.code in [ErrorCode.network_error,
ErrorCode.fatal,
ErrorCode.unexpected]:
return retry_flag is not retry.TIMEOUT
else:
return False | def should_retry_on_error(self, error) | rules for retry
:param error:
ProtocolException that returns from Server | 4.737093 | 4.589998 | 1.032047 |
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ = '__init__'
methods = {
'__init__': init,
'__async_client_class__': async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + 'Client', (object,), methods) | def client_for(service, service_module, thrift_service_name=None) | Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient. | 3.187078 | 3.086388 | 1.032624 |
def call(self, *args, **kwargs):
if not self.threadloop.is_ready():
self.threadloop.start()
return self.threadloop.submit(
getattr(self.async_thrift, method_name), *args, **kwargs
)
return call | def generate_method(method_name) | Generate a method for a given Thrift service.
Uses the provided TChannelSyncClient's threadloop in order
to convert RPC calls to concurrent.futures
:param method_name: Method being called.
:return: A method that invokes the RPC using TChannelSyncClient | 4.848136 | 3.807741 | 1.273231 |
assert stream, "stream is required"
chunks = []
chunk = yield stream.read()
while chunk:
chunks.append(chunk)
chunk = yield stream.read()
raise tornado.gen.Return(b''.join(chunks)) | def read_full(stream) | Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents. | 3.502685 | 3.588741 | 0.976021 |
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close() # we don't intend to write anything
return stream
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(s, bytearray):
s = bytes(s)
if isinstance(s, bytes):
stream = InMemStream(s)
stream.close() # we don't intend to write anything
return stream
# s may still conform to the Stream interface. Yay duck typing.
return s | def maybe_stream(s) | Ensure that the given argument is a stream. | 3.476686 | 3.33568 | 1.042272 |
message = ErrorMessage(
id=protocol_exception.id,
code=protocol_exception.code,
tracing=protocol_exception.tracing,
description=protocol_exception.description,
)
return message | def build_raw_error_message(protocol_exception) | build protocol level error message based on Error object | 3.584432 | 3.539193 | 1.012782 |
request.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if request.state == StreamState.init:
message = CallRequestMessage(
flags=request.flags,
ttl=request.ttl * 1000,
tracing=request.tracing,
service=request.service,
headers=request.headers,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif request.state == StreamState.streaming:
message = CallRequestContinueMessage(
flags=request.flags,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = request.id
return message | def build_raw_request_message(self, request, args, is_completed=False) | build protocol level message based on request and args.
request object contains meta information about outgoing request.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param request: Request
:param args: array of arg streams
:param is_completed: message flags
:return: CallRequestMessage/CallRequestContinueMessage | 3.827713 | 3.551501 | 1.077773 |
response.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if response.state == StreamState.init:
message = CallResponseMessage(
flags=response.flags,
code=response.code,
tracing=response.tracing,
headers=response.headers,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif response.state == StreamState.streaming:
message = CallResponseContinueMessage(
flags=response.flags,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = response.id
return message | def build_raw_response_message(self, response, args, is_completed=False) | build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage | 3.809896 | 3.488093 | 1.092257 |
args = self.prepare_args(message)
# TODO decide what to pass to Request from message
req = Request(
flags=message.flags,
ttl=message.ttl / 1000.0,
tracing=message.tracing,
service=message.service,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return req | def build_request(self, message) | Build inbound request object from protocol level message info.
It is allowed to take incompleted CallRequestMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallRequestMessage
:return: request object | 5.971936 | 5.877263 | 1.016108 |
args = self.prepare_args(message)
# TODO decide what to pass to Response from message
res = Response(
flags=message.flags,
code=message.code,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return res | def build_response(self, message) | Build response object from protocol level message info
It is allowed to take incompleted CallResponseMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallResponseMessage
:return: response object | 6.945457 | 6.60809 | 1.051054 |
context = None
if message.message_type in [Types.CALL_REQ,
Types.CALL_RES]:
self.verify_message(message)
context = self.build_context(message)
# streaming message
if message.flags == common.FlagsType.fragment:
self.message_buffer[message.id] = context
# find the incompleted stream
num = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
num = i
break
self.close_argstream(context, num)
return context
elif message.message_type in [Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
context = self.message_buffer.get(message.id)
if context is None:
# missing call msg before continue msg
raise FatalProtocolError(
"missing call message after receiving continue message",
message.id,
)
# find the incompleted stream
dst = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
dst = i
break
try:
self.verify_message(message)
except InvalidChecksumError as e:
context.argstreams[dst].set_exception(e)
raise
src = 0
while src < len(message.args):
context.argstreams[dst].write(message.args[src])
dst += 1
src += 1
if message.flags != FlagsType.fragment:
# get last fragment. mark it as completed
assert (len(context.argstreams) ==
CallContinueMessage.max_args_num)
self.message_buffer.pop(message.id, None)
context.flags = FlagsType.none
self.close_argstream(context, dst - 1)
return None
elif message.message_type == Types.ERROR:
context = self.message_buffer.pop(message.id, None)
if context is None:
log.info('Unconsumed error %s', message)
return None
else:
error = TChannelError.from_code(
message.code,
description=message.description,
tracing=context.tracing,
)
context.set_exception(error)
return error
else:
return message | def build(self, message) | buffer all the streaming messages based on the
message id. Reconstruct all fragments together.
:param message:
incoming message
:return: next complete message or None if streaming
is not done | 3.858809 | 3.795999 | 1.016547 |
if message.message_type in [Types.CALL_RES,
Types.CALL_REQ,
Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
# split a call/request message into an array
# with a call/request message and {0~n} continue
# message
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
while fragment_msg is not None:
message = fragment_msg
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
else:
yield message | def fragment(self, message) | Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size | 4.614045 | 4.533188 | 1.017837 |
if verify_checksum(
message,
self.in_checksum.get(message.id, 0),
):
self.in_checksum[message.id] = message.checksum[1]
if message.flags == FlagsType.none:
self.in_checksum.pop(message.id)
else:
self.in_checksum.pop(message.id, None)
raise InvalidChecksumError(
description="Checksum does not match!",
id=message.id,
) | def verify_message(self, message) | Verify the checksum of the message. | 4.384924 | 4.111405 | 1.066527 |
assert rws is not None
if len(rws) == 1 and isinstance(rws[0], list):
# In case someone does chain([l0, l1, ...])
rws = rws[0]
return ChainReadWriter(rws) | def chain(*rws) | Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters | 4.431909 | 5.19194 | 0.853613 |
s = stream.read(num)
slen = len(s)
if slen != num:
raise ReadError(
"Expected %d bytes but got %d bytes." % (num, slen)
)
return s | def take(self, stream, num) | Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected | 3.164449 | 2.841121 | 1.113803 |
methods = inspect.getmembers(iface, predicate=inspect.ismethod)
return set(
name for (name, method) in methods if not name.startswith('__')
) | def get_service_methods(iface) | Get a list of methods defined in the interface for a Thrift service.
:param iface:
The Thrift-generated Iface class defining the interface for the
service.
:returns:
A set containing names of the methods defined for the service. | 3.15468 | 4.706827 | 0.670235 |
warnings.simplefilter('default')
warnings.warn(message, category=DeprecationWarning)
warnings.resetwarnings() | def deprecate(message) | Loudly prints warning. | 3.51654 | 3.720855 | 0.945089 |
def decorator(fn):
@functools.wraps(fn)
def new_fn(*args, **kwargs):
deprecate(message)
return fn(*args, **kwargs)
return new_fn
return decorator | def deprecated(message) | Warn every time a fn is called. | 2.191399 | 2.036007 | 1.076322 |
# TODO replace with more specific exceptions
# assert service, 'service is required'
# assert path, 'path is required'
# Backwards compatibility for callers passing in service name as first arg.
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport) | def load(path, service=None, hostport=None, module_name=None) | Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file. | 6.559212 | 6.156335 | 1.065441 |
def decorator(method, handler):
if not method:
method = handler.__name__
function = getattr(service, method, None)
assert function, (
'Service "%s" does not define method "%s"' % (service.name, method)
)
assert not function.oneway
dispatcher.register(
function.endpoint,
build_handler(function, handler),
ThriftRWSerializer(service._module, function._request_cls),
ThriftRWSerializer(service._module, function._response_cls),
)
return handler
if handler is None:
return partial(decorator, method)
else:
return decorator(method, handler) | def register(dispatcher, service, handler=None, method=None) | :param dispatcher:
RequestDispatcher against which the new endpoint will be registered.
:param Service service:
Service object representing the service whose endpoint is being
registered.
:param handler:
A function implementing the given Thrift function.
:param method:
If specified, name of the method being registered. Defaults to the
name of the ``handler`` function. | 4.368715 | 3.9775 | 1.098357 |
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(
sock.fileno(), 0x8915, struct.pack('256s', interface[:15])
)[20:24]
) | def interface_ip(interface) | Determine the IP assigned to us by the given network interface. | 1.635522 | 1.626305 | 1.005667 |
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith('127.'):
# Check eth0, eth1, eth2, en0, ...
interfaces = [
i + str(n) for i in ("eth", "en", "wlan") for n in xrange(3)
] # :(
for interface in interfaces:
try:
ip = interface_ip(interface)
break
except IOError:
pass
return ip | def local_ip() | Get the local network IP of this machine | 3.630116 | 3.581563 | 1.013557 |
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def new(cls, tchannel, hostport=None, trace=False, protocol_headers=None):
protocol_headers = protocol_headers or {}
protocol_headers['as'] = 'thrift'
return _ClientBase.__new__(
cls, tchannel, hostport, service, trace, protocol_headers
)
new.__name__ = '__new__'
methods = {'__new__': new}
for method_name in method_names:
methods[method_name] = generate_method(
service_module, thrift_service_name, method_name
)
return type(thrift_service_name + 'Client', (_ClientBase,), methods) | def client_for(service, service_module, thrift_service_name=None) | Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service. | 3.581818 | 3.47062 | 1.03204 |
assert service_module
assert service_name
assert method_name
args_type = getattr(service_module, method_name + '_args')
result_type = getattr(service_module, method_name + '_result', None)
serializer = ThriftSerializer(result_type)
# oneway not currently supported
# TODO - write test for this
if result_type is None:
def not_supported(self, *args, **kwags):
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway procedues'
)
return not_supported
result_spec = result_type.thrift_spec
# result_spec is a tuple of tuples in the form:
#
# (fieldId, fieldType, fieldName, ...)
#
# Where "..." is other information we don't care about right now.
#
# result_spec will be empty if there is no return value or exception for
# the method.
#
# Its first element, with field ID 0, contains the spec for the return
# value. It is None if the result type is void but the method may still
# throw exceptions.
#
# Elements after the first one are specs for the exceptions.
endpoint = '%s::%s' % (service_name, method_name)
@gen.coroutine
def send(self, *args, **kwargs):
params = inspect.getcallargs(
getattr(service_module.Iface, method_name), self, *args, **kwargs
)
params.pop('self') # self is already known
# $methodName_args is the implicit struct containing the various
# method parameters.
call_args = args_type()
for name, value in params.items():
setattr(call_args, name, value)
tracer = tracing.ClientTracer(channel=self.tchannel)
span, headers = tracer.start_span(
service=service_name, endpoint=method_name, headers={}
)
body = serializer.serialize_body(call_args)
header = serializer.serialize_header(headers)
# Glue for old API.
if hasattr(self.tchannel, 'request'):
tracing.apply_trace_flag(span, self.trace, True)
with span:
response = yield self.tchannel.request(
hostport=self.hostport, service=self.service
).send(
arg1=endpoint,
arg2=header,
arg3=body, # body
headers=self.protocol_headers,
)
body = yield response.get_body()
else:
with span:
response = yield self.tchannel.call(
scheme=schemes.THRIFT,
service=self.service,
arg1=endpoint,
arg2=header,
arg3=body,
hostport=self.hostport,
trace=self.trace,
tracing_span=span
# TODO: Need to handle these!
# headers=self.protocol_headers,
)
body = response.body
call_result = serializer.deserialize_body(body)
if not result_spec:
# void return type and no exceptions allowed
raise gen.Return(None)
for exc_spec in result_spec[1:]:
# May have failed with an exception
exc = getattr(call_result, exc_spec[2])
if exc is not None:
raise exc
if result_spec[0]:
# Non-void return type. Return the result.
success = getattr(call_result, result_spec[0][2])
if success is not None:
raise gen.Return(success)
else:
# No return type specified and no exceptions raised.
raise gen.Return(None)
# Expected a result but nothing was present in the object. Something
# went wrong.
from thrift import Thrift
raise Thrift.TApplicationException(
Thrift.TApplicationException.MISSING_RESULT,
'%s failed: did not receive a result as expected' % method_name
)
# TODO: We should probably throw a custom exception instead.
send.__name__ = method_name
return send | def generate_method(service_module, service_name, method_name) | Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called | 4.22488 | 4.205515 | 1.004605 |
# Prefer incoming connections over outgoing connections.
if self.connections:
# First value is an incoming connection
future = gen.Future()
future.set_result(self.connections[0])
return future
if self._connecting:
# If we're in the process of connecting to the peer, just wait
# and re-use that connection.
return self._connecting
conn_future = self._connecting = self.connection_class.outgoing(
hostport=self.hostport,
process_name=self.tchannel.process_name,
serve_hostport=self.tchannel.hostport,
handler=self.tchannel.receive_call,
tchannel=self.tchannel,
)
def on_connect(_):
if not conn_future.exception():
# We don't actually need to handle the exception. That's on
# the caller.
connection = conn_future.result()
self.register_outgoing_conn(connection)
self._connecting = None
conn_future.add_done_callback(on_connect)
return conn_future | def connect(self) | Get a connection to this peer.
If an connection to the peer already exists (either incoming or
outgoing), that's returned. Otherwise, a new outgoing connection to
this peer is created.
:return:
A future containing a connection to this host. | 4.667859 | 4.420701 | 1.055909 |
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.append(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | def register_outgoing_conn(self, conn) | Add outgoing connection into the heap. | 5.324296 | 5.236023 | 1.016859 |
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | def register_incoming_conn(self, conn) | Add incoming connection into the heap. | 5.856437 | 5.531306 | 1.05878 |
# Outgoing connections are on the right
return list(
dropwhile(lambda c: c.direction != OUTGOING, self.connections)
) | def outgoing_connections(self) | Returns a list of all outgoing connections for this peer. | 10.960436 | 9.210374 | 1.19001 |
# Incoming connections are on the left.
return list(
takewhile(lambda c: c.direction == INCOMING, self.connections)
) | def incoming_connections(self) | Returns a list of all incoming connections for this peer. | 8.675994 | 6.824643 | 1.271274 |
blacklist = blacklist or set()
peer = None
connection = None
while connection is None:
peer = self._choose(blacklist)
if not peer:
raise NoAvailablePeerError(
"Can't find an available peer for '%s'" % self.service
)
try:
connection = yield peer.connect()
except NetworkError as e:
log.info(
'Failed to connect to %s. Trying a different host.',
peer.hostport,
exc_info=e,
)
connection = None
blacklist.add(peer.hostport)
raise gen.Return((peer, connection)) | def _get_peer_connection(self, blacklist=None) | Find a peer and connect to it.
Returns a ``(peer, connection)`` tuple.
Raises ``NoAvailablePeerError`` if no healthy peers are found.
:param blacklist:
If given, a set of hostports for peers that we must not try. | 3.615108 | 3.237301 | 1.116705 |
# find a peer connection
# If we can't find available peer at the first time, we throw
# NoAvailablePeerError. Later during retry, if we can't find available
# peer, we throw exceptions from retry not NoAvailablePeerError.
peer, connection = yield self._get_peer_connection()
arg1, arg2, arg3 = (
maybe_stream(arg1), maybe_stream(arg2), maybe_stream(arg3)
)
if retry_limit is None:
retry_limit = DEFAULT_RETRY_LIMIT
ttl = ttl or DEFAULT_TIMEOUT
# hack to get endpoint from arg_1 for trace name
arg1.close()
endpoint = yield read_full(arg1)
# set default transport headers
headers = headers or {}
for k, v in self.headers.iteritems():
headers.setdefault(k, v)
if self.tracing_span is None:
tracer = ClientTracer(channel=self.tchannel)
self.tracing_span, _ = tracer.start_span(
service=self.service, endpoint=endpoint,
hostport=self._hostport, encoding=self.headers.get('as')
)
request = Request(
service=self.service,
argstreams=[InMemStream(endpoint), arg2, arg3],
id=connection.writer.next_message_id(),
headers=headers,
endpoint=endpoint,
ttl=ttl,
tracing=tracing.span_to_tracing_field(self.tracing_span)
)
# only retry on non-stream request
if request.is_streaming_request or self._hostport:
retry_limit = 0
if request.is_streaming_request:
request.ttl = 0
try:
with self.tracing_span: # to ensure span is finished
response = yield self.send_with_retry(
request, peer, retry_limit, connection
)
except Exception as e:
# event: on_exception
exc_info = sys.exc_info()
yield self.tchannel.event_emitter.fire(
EventType.on_exception, request, e,
)
six.reraise(*exc_info)
log.debug("Got response %s", response)
raise gen.Return(response) | def send(
self, arg1, arg2, arg3,
headers=None,
retry_limit=None,
ttl=None,
) | Make a request to the Peer.
:param arg1:
String or Stream containing the contents of arg1. If None, an empty
stream is used.
:param arg2:
String or Stream containing the contents of arg2. If None, an empty
stream is used.
:param arg3:
String or Stream containing the contents of arg3. If None, an empty
stream is used.
:param headers:
Headers will be put in the message as protocol header.
:param retry_limit:
Maximum number of retries will perform on the message. If the number
is 0, it means no retry.
:param ttl:
Timeout for each request (second).
:return:
Future that contains the response from the peer. | 5.204395 | 5.11831 | 1.016819 |
try:
for peer in self._peers.values():
peer.close()
finally:
self._peers = {}
self._resetting = False | def clear(self) | Reset this PeerGroup.
This closes all connections to all known peers and forgets about
these peers.
:returns:
A Future that resolves with a value of None when the operation
has finished | 5.387828 | 5.168099 | 1.042516 |
assert hostport, "hostport is required"
peer = self._peers.pop(hostport, None)
peer_in_heap = peer and peer.index != -1
if peer_in_heap:
self.peer_heap.remove_peer(peer)
return peer | def remove(self, hostport) | Delete the Peer for the given host port.
Does nothing if a matching Peer does not exist.
:returns: The removed Peer | 4.672421 | 5.170458 | 0.903676 |
assert hostport, "hostport is required"
assert isinstance(hostport, basestring), "hostport must be a string"
if hostport not in self._peers:
self._add(hostport)
return self._peers[hostport] | def get(self, hostport) | Get a Peer for the given destination.
A new Peer is added to the peer heap and returned if one does
not already exist for the given host-port. Otherwise, the
existing Peer is returned. | 2.889843 | 2.646415 | 1.091984 |
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
on_conn_change=self._update_heap,
)
peer.rank = self.rank_calculator.get_rank(peer)
self._peers[peer.hostport] = peer
self.peer_heap.add_and_shuffle(peer) | def _add(self, hostport) | Creates a peer from the hostport and adds it to the peer heap | 5.83854 | 4.654508 | 1.254384 |
rank = self.rank_calculator.get_rank(peer)
if rank == peer.rank:
return
peer.rank = rank
self.peer_heap.update_peer(peer) | def _update_heap(self, peer) | Recalculate the peer's rank and update itself in the peer heap. | 5.199243 | 3.464328 | 1.500794 |
assert hostport, "hostport is required"
if hostport not in self._peers:
# Add a peer directly from a hostport, do NOT add it to the peer
# heap
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
)
self._peers[peer.hostport] = peer
return self._peers[hostport] | def _get_isolated(self, hostport) | Get a Peer for the given destination for a request.
A new Peer is added and returned if one does not already exist for the
given host-port. Otherwise, the existing Peer is returned.
**NOTE** new peers will not be added to the peer heap. | 5.425991 | 4.27226 | 1.270052 |
return PeerClientOperation(
peer_group=self,
service=service,
hostport=hostport,
**kwargs) | def request(self, service, hostport=None, **kwargs) | Initiate a new request through this PeerGroup.
:param hostport:
If specified, requests will be sent to the specific host.
Otherwise, a known peer will be picked at random.
:param service:
Name of the service being called. Defaults to an empty string. | 8.032767 | 8.166477 | 0.983627 |
blacklist = blacklist or set()
if hostport:
return self._get_isolated(hostport)
return self.peer_heap.smallest_peer(
(lambda p: p.hostport not in blacklist and not p.is_ephemeral),
) | def choose(self, hostport=None, blacklist=None) | Choose a Peer that matches the given criteria.
:param hostport:
Specifies that the returned Peer must be for the given host-port.
Without this, all peers managed by this PeerGroup are
candidates.
:param blacklist:
Peers on the blacklist won't be chosen.
:returns:
A Peer that matches all the requested criteria or None if no such
Peer was found. | 8.648661 | 9.27278 | 0.932693 |
assert is_future(future), 'you forgot to pass a future'
def decorator(f):
@wraps(f)
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
future.set_exc_info(sys.exc_info())
return new_f
return decorator | def fail_to(future) | A decorator for function callbacks to catch uncaught non-async
exceptions and forward them to the given future.
The primary use for this is to catch exceptions in async callbacks and
propagate them to futures. For example, consider,
.. code-block:: python
answer = Future()
def on_done(future):
foo = bar()
answer.set_result(foo)
some_async_operation().add_done_callback(on_done)
If ``bar()`` fails, ``answer`` will never get filled with an exception or
a result. Now if we change ``on_done`` to,
.. code-block:: python
@fail_to(answer)
def on_done(future):
foo = bar()
answer.set_result(foo)
Uncaught exceptions in ``on_done`` will be caught and propagated to
``answer``. Note that ``on_done`` will return None if an exception was
caught.
:param answer:
Future to which the result will be written. | 2.71701 | 3.441341 | 0.789521 |
if index < len(context.argstreams):
arg = ""
chunk = yield context.argstreams[index].read()
while chunk:
arg += chunk
chunk = yield context.argstreams[index].read()
raise tornado.gen.Return(arg)
else:
raise TChannelError() | def get_arg(context, index) | get value from arg stream in async way | 4.715179 | 3.657384 | 1.289222 |
io_loop = IOLoop.current()
new_hole = Future()
new_put = Future()
new_put.set_result(new_hole)
with self._lock:
self._put, put = new_put, self._put
answer = Future()
def _on_put(future):
if future.exception(): # pragma: no cover (never happens)
return answer.set_exc_info(future.exc_info())
old_hole = put.result()
old_hole.set_result(Node(value, new_hole))
answer.set_result(None)
io_loop.add_future(put, _on_put)
return answer | def put(self, value) | Puts an item into the queue.
Returns a Future that resolves to None once the value has been
accepted by the queue. | 4.014313 | 3.871248 | 1.036956 |
new_get = Future()
with self._lock:
if not self._get.done():
raise QueueEmpty
get, self._get = self._get, new_get
hole = get.result()
if not hole.done():
# Restore the unfinished hole.
new_get.set_result(hole)
raise QueueEmpty
node = hole.result()
value = node.value
new_hole, node.next = node.next, None
new_get.set_result(new_hole)
return value | def get_nowait(self) | Returns a value from the queue without waiting.
Raises ``QueueEmpty`` if no values are available right now. | 4.673251 | 4.712015 | 0.991773 |
io_loop = IOLoop.current()
new_get = Future()
with self._lock:
get, self._get = self._get, new_get
answer = Future()
def _on_node(future):
if future.exception(): # pragma: no cover (never happens)
return answer.set_exc_info(future.exc_info())
node = future.result()
value = node.value
new_hole, node.next = node.next, None
new_get.set_result(new_hole)
answer.set_result(value)
def _on_get(future):
if future.exception(): # pragma: no cover (never happens)
return answer.set_exc_info(future.exc_info())
hole = future.result()
io_loop.add_future(hole, _on_node)
io_loop.add_future(get, _on_get)
return answer | def get(self) | Gets the next item from the queue.
Returns a Future that resolves to the next item once it is available. | 2.807838 | 2.729111 | 1.028847 |
new_args = []
key_length = 2 # 2bytes for size
for i, arg in enumerate(self.args):
if space_left >= key_length:
space_left -= key_length
if arg is not None:
arg_length = len(arg)
if space_left < arg_length:
fragment_msg.args.append(arg[space_left:])
new_args.append(arg[:space_left])
space_left = 0
else:
new_args.append(arg)
space_left -= arg_length
if space_left <= key_length:
# boundary for arg
fragment_msg.args.append("")
else:
new_args.append("")
else:
for l in range(i, len(self.args)):
fragment_msg.args.append(self.args[l])
break
self.args = new_args
if space_left >= 0 and len(fragment_msg.args) == 0:
# don't need to fragment any more
return None
else:
self.flags = FlagsType.fragment
fragment_msg.id = self.id
return fragment_msg | def fragment(self, space_left, fragment_msg) | Streaming Message got fragmented based on
payload size. All the data within space_left
will be kept. All the rest will be shifted to
next fragment message.
:param space_left:
space left for current frame
:param fragment_msg:
the type is either CallRequestMessage or
CallResponseMessage
:return: None if there is space left
or next fragment message | 2.940918 | 2.865662 | 1.026262 |
# if none then give empty Response
if mixed is None:
return Response()
# if not Response, then treat like body
if not isinstance(mixed, Response):
return Response(mixed)
# it's already a Response
return mixed | def response_from_mixed(mixed) | Create Response from mixed input. | 6.507653 | 5.746977 | 1.132361 |
if event_type is not None:
assert type(event_type) is int, "register hooks with int values"
return self.hooks[event_type].append(hook)
for event_type in EventType._fields:
func = getattr(hook, event_type, None)
if callable(func):
event_value = getattr(EventType, event_type)
self.register_hook(func, event_value) | def register_hook(self, hook, event_type=None) | If ``event_type`` is provided, then ``hook`` will be called whenever
that event is fired.
If no ``event_type`` is specifid, but ``hook`` implements any methods
with names matching an event hook, then those will be registered with
their corresponding events. This allows for more stateful, class-based
event handlers. | 3.550079 | 3.863645 | 0.918842 |
# heapify
n = h.size()
for i in six.moves.range(int(math.floor(n/2)) - 1, -1, -1):
down(h, i, n) | def init(h) | Initialize existing object into the heap. | 3.641186 | 4.2253 | 0.861758 |
h.push(x)
up(h, h.size()-1) | def push(h, x) | Push a new value into heap. | 7.370098 | 5.405749 | 1.363381 |
n = h.size() - 1
h.swap(0, n)
down(h, 0, n)
return h.pop() | def pop(h) | Pop the heap value from the heap. | 5.321638 | 3.76052 | 1.415133 |
n = h.size() - 1
if n != i:
h.swap(i, n)
down(h, i, n)
up(h, i)
return h.pop() | def remove(h, i) | Remove the item at position i of the heap. | 4.93756 | 3.471289 | 1.422399 |
down(h, i, h.size())
up(h, i) | def fix(h, i) | Rearrange the heap after the item at position i got updated. | 11.581713 | 11.049464 | 1.04817 |
n = heap.size()
# items contains indexes of items yet to be checked.
items = deque([0])
while items:
current = items.popleft()
if current >= n:
continue
if predicate(heap.peek(current)):
return current
child1 = 2 * current + 1
child2 = child1 + 1
if child1 < n and child2 < n and heap.lt(child2, child1):
# make sure we check the smaller child first.
child1, child2 = child2, child1
if child1 < n:
items.append(child1)
if child2 < n:
items.append(child2)
raise NoMatchError() | def smallest(heap, predicate) | Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found. | 3.160834 | 3.244645 | 0.974169 |
if span is None:
return common.random_tracing()
# noinspection PyBroadException
try:
carrier = {}
span.tracer.inject(span, ZIPKIN_SPAN_FORMAT, carrier)
tracing = Tracing(span_id=carrier['span_id'],
trace_id=carrier['trace_id'],
parent_id=carrier['parent_id'] or 0L,
traceflags=carrier['traceflags'])
return tracing
except opentracing.UnsupportedFormatException:
pass # tracer might not support Zipkin format
except:
log.exception('Failed to inject tracing span into headers')
return common.random_tracing() | def span_to_tracing_field(span) | Inject the span into Trace field, if Zipkin format is supported
:param span: OpenTracing Span | 4.723425 | 4.366578 | 1.081722 |
if trace is None:
trace = default_trace
trace = trace() if callable(trace) else trace
if trace is False and span:
span.set_tag(tags.SAMPLING_PRIORITY, 0) | def apply_trace_flag(span, trace, default_trace) | If ``trace`` (or ``default_trace``) is False, disables tracing on ``span``.
:param span:
:param trace:
:param default_trace:
:return: | 3.120228 | 3.532976 | 0.883173 |
# noinspection PyBroadException
try:
# Currently Java does not populate Tracing field, so do not
# mistaken it for a real trace ID.
if request.tracing.trace_id:
context = self.tracer.extract(
format=ZIPKIN_SPAN_FORMAT,
carrier=request.tracing)
self.span = self.tracer.start_span(
operation_name=request.endpoint,
child_of=context,
tags={tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER},
)
except opentracing.UnsupportedFormatException:
pass # tracer might not support Zipkin format
except:
log.exception('Cannot extract tracing span from Trace field') | def start_basic_span(self, request) | Start tracing span from the protocol's `tracing` fields.
This will only work if the `tracer` supports Zipkin-style span context.
:param request: inbound request
:type request: tchannel.tornado.request.Request | 6.031137 | 5.471188 | 1.102345 |
parent_context = None
# noinspection PyBroadException
try:
if headers and hasattr(headers, 'iteritems'):
tracing_headers = {
k[len(TRACING_KEY_PREFIX):]: v
for k, v in headers.iteritems()
if k.startswith(TRACING_KEY_PREFIX)
}
parent_context = self.tracer.extract(
format=opentracing.Format.TEXT_MAP,
carrier=tracing_headers
)
if self.span and parent_context:
# we already started a span from Tracing fields,
# so only copy baggage from the headers.
for k, v in parent_context.baggage.iteritems():
self.span.set_baggage_item(k, v)
except:
log.exception('Cannot extract tracing span from headers')
if self.span is None:
self.span = self.tracer.start_span(
operation_name=request.endpoint,
child_of=parent_context,
tags={tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER},
)
if 'cn' in request.headers:
self.span.set_tag(tags.PEER_SERVICE, request.headers['cn'])
if peer_host:
self.span.set_tag(tags.PEER_HOST_IPV4, peer_host)
if peer_port:
self.span.set_tag(tags.PEER_PORT, peer_port)
if 'as' in request.headers:
self.span.set_tag('as', request.headers['as'])
return self.span | def start_span(self, request, headers, peer_host, peer_port) | Start a new server-side span. If the span has already been started
by `start_basic_span`, this method only adds baggage from the headers.
:param request: inbound tchannel.tornado.request.Request
:param headers: dictionary containing parsed application headers
:return: | 2.322868 | 2.298594 | 1.01056 |
return {
TIMEOUT: TimeoutError,
CANCELED: CanceledError,
BUSY: BusyError,
DECLINED: DeclinedError,
UNEXPECTED_ERROR: UnexpectedError,
BAD_REQUEST: BadRequestError,
NETWORK_ERROR: NetworkError,
UNHEALTHY: UnhealthyError,
FATAL: FatalProtocolError,
}[code](**kw) | def from_code(cls, code, **kw) | Construct a ``TChannelError`` instance from an error code.
This will return the appropriate class type for the given code. | 4.462851 | 3.888222 | 1.147787 |
logging.getLogger().setLevel(logging.DEBUG)
logging.info('Python Tornado Crossdock Server Starting ...')
tracer = Tracer(
service_name='python',
reporter=NullReporter(),
sampler=ConstSampler(decision=True))
opentracing.tracer = tracer
tchannel = TChannel(name='python', hostport=':%d' % DEFAULT_SERVER_PORT,
trace=True)
register_tchannel_handlers(tchannel=tchannel)
tchannel.listen()
app = tornado.web.Application(debug=True)
register_http_handlers(app)
app.listen(DEFAULT_CLIENT_PORT)
tornado.ioloop.IOLoop.current().start() | def serve() | main entry point | 4.553303 | 4.532699 | 1.004546 |
if not service:
service = service_module.__name__.rsplit('.', 1)[-1]
if not method:
method = handler.__name__
assert service, 'A service name could not be determined'
assert method, 'A method name could not be determined'
assert hasattr(service_module.Iface, method), (
"Service %s doesn't define method %s" % (service, method)
)
assert hasattr(service_module, method + '_result'), (
"oneway methods are not yet supported"
)
endpoint = '%s::%s' % (service, method)
args_type = getattr(service_module, method + '_args')
result_type = getattr(service_module, method + '_result')
# if the dispatcher is set to deal with handlers that
# return responses, then use new api, else use deprecated
if dispatcher._handler_returns_response:
new_handler = build_handler(result_type, handler)
else:
new_handler = deprecated_build_handler(result_type, handler)
dispatcher.register(
endpoint,
new_handler,
ThriftSerializer(args_type),
ThriftSerializer(result_type)
)
return handler | def register(dispatcher, service_module, handler, method=None, service=None) | Registers a Thrift service method with the given RequestDispatcher.
.. code-block:: python
# For,
#
# service HelloWorld { string hello(1: string name); }
import tchannel.thrift
import HelloWorld
def hello(request, response):
name = request.args.name
response.write_result("Hello, %s" % name)
dispatcher = RequestDispatcher()
tchannel.thrift.register(dispatcher, HelloWorld, hello)
:param dispatcher:
TChannel dispatcher with which the Thrift service will be registered.
:param service_module:
The service module generated by Thrift. This module contains the
service ``Iface``, ``Client``, ``Processor``, etc. classes.
:param handler:
A function implementing the request handler. The function must accept
a ``request``, a ``response``, and a ``tchannel``.
:param service:
Thrift service name. This is the `service` name specified in the
Thrift IDL. If omitted, it is automatically determined based on the
name of ``service_module``.
:param method:
Name of the method. Defaults to the name of the ``handler`` function. | 3.481699 | 3.34718 | 1.040189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.