code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
project_root = _get_project_root_from_conf_path(conf_path) config = load_config_in_dir(project_root) return partial(config_get, config)
def make_config_get(conf_path)
Return a function to get configuration options for a specific project Args: conf_path (path-like): path to project's conf file (i.e. foo.conf module)
4.132843
5.228827
0.790396
path = pathlib.Path(diff.b_path) contrib_path = project.contrib_module_path return path.relative_to(contrib_path)
def relative_to_contrib(diff, project)
Compute relative path of changed file to contrib dir Args: diff (git.diff.Diff): file diff project (Project): project Returns: Path
4.189955
5.027642
0.833384
result = get_pr_num(repo=self.repo) if result is None: result = get_travis_pr_num() return result
def pr_num(self)
Return the PR number or None if not on a PR
6.302996
4.929211
1.278703
result = get_branch(repo=self.repo) if result is None: result = get_travis_branch() return result
def branch(self)
Return whether the project is on master branch
8.404786
7.601512
1.105673
return pathlib.Path(self.package.__file__).resolve().parent.parent
def path(self)
Return the project path (aka project root) If ``package.__file__`` is ``/foo/foo/__init__.py``, then project.path should be ``/foo``.
15.841886
7.523225
2.105731
arr = np.asarray(a) if arr.ndim == 1: arr = arr.reshape(-1, 1) return arr
def asarray2d(a)
Cast to 2d array
2.368
2.297499
1.030686
type_ = type(arr).__name__ # see also __qualname__ shape = getattr(arr, 'shape', None) if shape is not None: desc = '{type_} {shape}' else: desc = '{type_} <no shape>' return desc.format(type_=type_, shape=shape)
def get_arr_desc(arr)
Get array description, in the form '<array type> <array shape>
3.913209
3.882623
1.007878
_indent = ' ' * n return '\n'.join(_indent + line for line in text.split('\n'))
def indent(text, n=4)
Indent each line of text by n spaces
3.163125
2.934048
1.078076
nans = np.isnan(obj) while np.ndim(nans): nans = np.any(nans) return bool(nans)
def has_nans(obj)
Check if obj has any NaNs Compatible with different behavior of np.isnan, which sometimes applies over all axes (py35, py35) and sometimes does not (py34).
3.527972
3.119606
1.130903
@wraps(f) def wrapped(pathlike, *args, **kwargs): path = pathlib.Path(pathlike) return f(path, *args, **kwargs) return wrapped
def needs_path(f)
Wraps a function that accepts path-like to give it a pathlib.Path
2.721911
1.904446
1.42924
# TODO just keep navigating up in the source tree until an __init__.py is # not found? modpath = pathlib.Path(modpath).resolve() if modpath.name == '__init__.py': # TODO improve debugging output with recommend change raise ValueError('Don\'t provide the __init__.py!') def is_package(modpath): return modpath.suffix != '.py' def has_init(dir): return dir.joinpath('__init__.py').is_file() def has_package_structure(modname, modpath): modparts = modname.split('.') n = len(modparts) dir = modpath if not is_package(modpath): n = n - 1 dir = dir.parent while n > 0: if not has_init(dir): return False dir = dir.parent n = n - 1 return True if not has_package_structure(modname, modpath): raise ImportError('Module does not have valid package structure.') parentpath = str(pathlib.Path(modpath).parent) finder = pkgutil.get_importer(parentpath) loader = finder.find_module(modname) if loader is None: raise ImportError( 'Failed to find loader for module {} within dir {}' .format(modname, parentpath)) mod = loader.load_module(modname) # TODO figure out what to do about this assert mod.__name__ == modname return mod
def import_module_at_path(modname, modpath)
Import module from path that may not be on system path Args: modname (str): module name from package root, e.g. foo.bar modpath (str): absolute path to module itself, e.g. /home/user/foo/bar.py. In the case of a module that is a package, then the path should be specified as '/home/user/foo' and a file '/home/user/foo/__init__.py' *must be present* or the import will fail. Examples: >>> modname = 'foo.bar.baz' >>> modpath = '/home/user/foo/bar/baz.py' >>> import_module_at_path(modname, modpath) <module 'foo.bar.baz' from '/home/user/foo/bar/baz.py'> >>> modname = 'foo.bar' >>> modpath = '/home/user/foo/bar' >>> import_module_at_path(modname, modpath) <module 'foo.bar' from '/home/user/foo/bar/__init__.py'>
3.319613
3.503991
0.947381
# don't try to resolve! p = pathlib.Path(relpath) if p.name == '__init__.py': p = p.parent elif p.suffix == '.py': p = p.with_suffix('') else: msg = 'Cannot convert a non-python file to a modname' msg_detail = 'The relpath given is: {}'.format(relpath) logger.error(msg + '\n' + msg_detail) raise ValueError(msg) return '.'.join(p.parts)
def relpath_to_modname(relpath)
Convert relative path to module name Within a project, a path to the source file is uniquely identified with a module name. Relative paths of the form 'foo/bar' are *not* converted to module names 'foo.bar', because (1) they identify directories, not regular files, and (2) already 'foo/bar/__init__.py' would claim that conversion. Args: relpath (str): Relative path from some location on sys.path Example: >>> relpath_to_modname('ballet/util/_util.py') 'ballet.util._util'
3.447106
4.298759
0.801884
parts = modname.split('.') relpath = pathlib.Path(*parts) # is the module a package? if so, the relpath identifies a directory # it is easier to check for whether a file is a directory than to try to # import the module dynamically and see whether it is a package if project_root is not None: relpath_resolved = pathlib.Path(project_root).joinpath(relpath) else: relpath_resolved = relpath if relpath_resolved.is_dir(): if add_init: relpath = relpath.joinpath('__init__.py') else: relpath = str(relpath) + '.py' return str(relpath)
def modname_to_relpath(modname, project_root=None, add_init=True)
Convert module name to relative path. The project root is usually needed to detect if the module is a package, in which case the relevant file is the `__init__.py` within the subdirectory. Example: >>> modname_to_relpath('foo.features') 'foo/features.py' >>> modname_to_relpath('foo.features', project_root='/path/to/project') 'foo/features/__init__.py' Args: modname (str): Module name, e.g. `os.path` project_root (str): Path to project root add_init (bool): Whether to add `__init__.py` to the path of modules that are packages. Defaults to True Returns: str
3.453205
3.667176
0.941652
input = feature.input is_str = isa(str) is_nested_str = all_fn( iterable, lambda x: all(is_str, x)) assert is_str(input) or is_nested_str(input)
def check(self, feature)
Check that the feature's `input` is a str or Iterable[str]
11.176204
7.562887
1.47777
assert hasattr(feature.transformer, 'fit') assert hasattr(feature.transformer, 'transform') assert hasattr(feature.transformer, 'fit_transform')
def check(self, feature)
Check that the feature has a fit/transform/fit_tranform interface
3.500332
2.32848
1.503269
mapper = feature.as_dataframe_mapper() mapper.fit(self.X, y=self.y)
def check(self, feature)
Check that fit can be called on reference data
9.919557
8.903927
1.114065
mapper = feature.as_dataframe_mapper() mapper.fit_transform(self.X, y=self.y)
def check(self, feature)
Check that fit_transform can be called on reference data
9.490126
7.310344
1.298178
mapper = feature.as_dataframe_mapper() X = mapper.fit_transform(self.X, y=self.y) assert self.X.shape[0] == X.shape[0]
def check(self, feature)
Check that the dimensions of the transformed data are correct For input X, an n x p array, a n x q array should be produced, where q is the number of features produced by the logical feature.
4.64256
4.818802
0.963426
try: buf = io.BytesIO() pickle.dump(feature, buf, protocol=pickle.HIGHEST_PROTOCOL) buf.seek(0) new_feature = pickle.load(buf) assert new_feature is not None assert isinstance(new_feature, Feature) finally: buf.close()
def check(self, feature)
Check that the feature can be pickled This is needed for saving the pipeline to disk
2.333774
2.248008
1.038152
mapper = feature.as_dataframe_mapper() X = mapper.fit_transform(self.X, y=self.y) assert not np.any(np.isnan(X))
def check(self, feature)
Check that the output of the transformer has no missing values
5.250391
4.258317
1.232974
laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags] feature_union = FeatureUnion([ (repr(lagger), lagger) for lagger in laggers ]) return feature_union
def make_multi_lagger(lags, groupby_kwargs=None)
Return a union of transformers that apply different lags Args: lags (Collection[int]): collection of lags to apply groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby
3.760173
4.890415
0.768886
project = Project.from_path(pathlib.Path.cwd().resolve()) contrib_dir = project.get('contrib', 'module_path') with tempfile.TemporaryDirectory() as tempdir: # render feature template output_dir = tempdir cc_kwargs['output_dir'] = output_dir rendered_dir = render_feature_template(**cc_kwargs) # copy into contrib dir src = rendered_dir dst = contrib_dir synctree(src, dst, onexist=_fail_if_feature_exists) logger.info('Start new feature successful.')
def start_new_feature(**cc_kwargs)
Start a new feature within a ballet project Renders the feature template into a temporary directory, then copies the feature files into the proper path within the contrib directory. Args: **cc_kwargs: options for the cookiecutter template Raises: ballet.exc.BalletError: the new feature has the same name as an existing one
6.450356
6.024002
1.070776
change_collector = ChangeCollector(project) collected_changes = change_collector.collect_changes() try: new_feature_info = one_or_raise(collected_changes.new_feature_info) importer, _, _ = new_feature_info except ValueError: raise BalletError('Too many features collected') module = importer() feature = _get_contrib_feature_from_module(module) return feature
def get_proposed_feature(project)
Get the proposed feature The path of the proposed feature is determined by diffing the project against a comparison branch, such as master. The feature is then imported from that path and returned. Args: project (ballet.project.Project): project info Raises: ballet.exc.BalletError: more than one feature collected
7.471415
5.754565
1.298346
def eq(feature): return feature.source == proposed_feature.source # deselect features that match the proposed feature result = lfilter(complement(eq), features) if len(features) - len(result) == 1: return result elif len(result) == len(features): raise BalletError( 'Did not find match for proposed feature within \'contrib\'') else: raise BalletError( 'Unexpected condition (n_features={}, n_result={})' .format(len(features), len(result)))
def get_accepted_features(features, proposed_feature)
Deselect candidate features from list of all features Args: features (List[Feature]): collection of all features in the ballet project: both accepted features and candidate ones that have not been accepted proposed_feature (Feature): candidate feature that has not been accepted Returns: List[Feature]: list of features with the proposed feature not in it. Raises: ballet.exc.BalletError: Could not deselect exactly the proposed feature.
6.818641
5.696659
1.196954
file_diffs = self._collect_file_diffs() candidate_feature_diffs, valid_init_diffs, inadmissible_diffs = \ self._categorize_file_diffs(file_diffs) new_feature_info = self._collect_feature_info(candidate_feature_diffs) return CollectedChanges( file_diffs, candidate_feature_diffs, valid_init_diffs, inadmissible_diffs, new_feature_info)
def collect_changes(self)
Collect file and feature changes Steps 1. Collects the files that have changed in this pull request as compared to a comparison branch. 2. Categorize these file changes into admissible or inadmissible file changes. Admissible file changes solely contribute python files to the contrib subdirectory. 3. Collect features from admissible new files. Returns: CollectedChanges
3.650304
2.705114
1.349409
# TODO move this into a new validator candidate_feature_diffs = [] valid_init_diffs = [] inadmissible_files = [] for diff in file_diffs: valid, failures = check_from_class( ProjectStructureCheck, diff, self.project) if valid: if pathlib.Path(diff.b_path).parts[-1] != '__init__.py': candidate_feature_diffs.append(diff) logger.debug( 'Categorized {file} as CANDIDATE FEATURE MODULE' .format(file=diff.b_path)) else: valid_init_diffs.append(diff) logger.debug( 'Categorized {file} as VALID INIT MODULE' .format(file=diff.b_path)) else: inadmissible_files.append(diff) logger.debug( 'Categorized {file} as INADMISSIBLE; ' 'failures were {failures}' .format(file=diff.b_path, failures=failures)) logger.info( 'Admitted {} candidate feature{} ' 'and {} __init__ module{} ' 'and rejected {} file{}' .format(len(candidate_feature_diffs), make_plural_suffix(candidate_feature_diffs), len(valid_init_diffs), make_plural_suffix(valid_init_diffs), len(inadmissible_files), make_plural_suffix(inadmissible_files))) return candidate_feature_diffs, valid_init_diffs, inadmissible_files
def _categorize_file_diffs(self, file_diffs)
Partition file changes into admissible and inadmissible changes
2.853005
2.727238
1.046115
project_root = self.project.path for diff in candidate_feature_diffs: path = diff.b_path modname = relpath_to_modname(path) modpath = project_root.joinpath(path) importer = partial(import_module_at_path, modname, modpath) yield importer, modname, modpath
def _collect_feature_info(self, candidate_feature_diffs)
Collect feature info Args: candidate_feature_diffs (List[git.diff.Diff]): list of Diffs corresponding to admissible file changes compared to comparison ref Returns: List[Tuple]: list of tuple of importer, module name, and module path. The "importer" is a callable that returns a module
5.151058
3.827881
1.345668
# noqa E501 try: travis_pull_request = get_travis_env_or_fail('TRAVIS_PULL_REQUEST') if truthy(travis_pull_request): travis_pull_request_branch = get_travis_env_or_fail( 'TRAVIS_PULL_REQUEST_BRANCH') return travis_pull_request_branch else: travis_branch = get_travis_env_or_fail('TRAVIS_BRANCH') return travis_branch except UnexpectedTravisEnvironmentError: return None
def get_travis_branch()
Get current branch per Travis environment variables If travis is building a PR, then TRAVIS_PULL_REQUEST is truthy and the name of the branch corresponding to the PR is stored in the TRAVIS_PULL_REQUEST_BRANCH environment variable. Else, the name of the branch is stored in the TRAVIS_BRANCH environment variable. See also: <https://docs.travis-ci.com/user/environment-variables/#default-environment-variables>
2.422051
2.143311
1.130051
if not features: features = Feature(input=[], transformer=NullTransformer()) if not iterable(features): features = (features, ) return DataFrameMapper( [t.as_input_transformer_tuple() for t in features], input_df=True)
def make_mapper(features)
Make a DataFrameMapper from a feature or list of features Args: features (Union[Feature, List[Feature]]): feature or list of features Returns: DataFrameMapper: mapper made from features
8.584916
7.503064
1.144188
def get_name(estimator): if isinstance(estimator, DelegatingRobustTransformer): return get_name(estimator._transformer) return type(estimator).__name__.lower() names = list(map(get_name, estimators)) counter = dict(Counter(names)) counter = select_values(lambda x: x > 1, counter) for i in reversed(range(len(estimators))): name = names[i] if name in counter: names[i] += "-%d" % counter[name] counter[name] -= 1 return list(zip(names, estimators))
def _name_estimators(estimators)
Generate names for estimators. Adapted from sklearn.pipeline._name_estimators
3.451588
3.452208
0.99982
repo = project.repo remote_name = project.get('project', 'remote') remote = repo.remote(remote_name) result = _call_remote_push(remote) failures = lfilter(complement(did_git_push_succeed), result) if failures: for push_info in failures: logger.error( 'Failed to push ref {from_ref} to {to_ref}' .format(from_ref=push_info.local_ref.name, to_ref=push_info.remote_ref.name)) raise BalletError('Push failed')
def _push(project)
Push default branch and project template branch to remote With default config (i.e. remote and branch names), equivalent to:: $ git push origin master:master project-template:project-template Raises: ballet.exc.BalletError: Push failed in some way
4.933591
4.437814
1.111716
if X_df is None: X_df, _ = load_data() if y_df is None: _, y_df = load_data() features = get_contrib_features() mapper_X = ballet.feature.make_mapper(features) X = mapper_X.fit_transform(X_df) encoder_y = get_target_encoder() y = encoder_y.fit_transform(y_df) return { 'X_df': X_df, 'features': features, 'mapper_X': mapper_X, 'X': X, 'y_df': y_df, 'encoder_y': encoder_y, 'y': y, }
def build(X_df=None, y_df=None)
Build features and target Args: X_df (DataFrame): raw variables y_df (DataFrame): raw target Returns: dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y
2.373301
1.914722
1.239501
import ballet.util.log ballet.util.log.enable(logger=logger, level='INFO', echo=False) ballet.util.log.enable(logger=ballet.util.log.logger, level='INFO', echo=False) X_df, y_df = load_data(input_dir=input_dir) out = build() mapper_X = out['mapper_X'] encoder_y = out['encoder_y'] X_ft = mapper_X.transform(X_df) y_ft = encoder_y.transform(y_df) save_features(X_ft, output_dir) save_targets(y_ft, output_dir)
def main(input_dir, output_dir)
Engineer features
3.246344
3.245113
1.000379
if input_dir is not None: tables = conf.get('tables') entities_table_name = conf.get('data', 'entities_table_name') entities_config = some(where(tables, name=entities_table_name)) X = load_table_from_config(input_dir, entities_config) targets_table_name = conf.get('data', 'targets_table_name') targets_config = some(where(tables, name=targets_table_name)) y = load_table_from_config(input_dir, targets_config) else: raise NotImplementedError return X, y
def load_data(input_dir=None)
Load data
2.876995
2.866748
1.003575
self._variables = self._variables or list(next(iter(epoch_data.values())).keys()) self._streams = epoch_data.keys() header = ['"epoch_id"'] for stream_name in self._streams: header += [stream_name + '_' + var for var in self._variables] with open(self._file_path, 'a') as file: file.write(self._delimiter.join(header) + '\n') self._header_written = True
def _write_header(self, epoch_data: EpochData) -> None
Write CSV header row with column names. Column names are inferred from the ``epoch_data`` and ``self.variables`` (if specified). Variables and streams expected later on are stored in ``self._variables`` and ``self._streams`` respectively. :param epoch_data: epoch data to be logged
3.589135
3.07862
1.165826
# list of values to be written values = [epoch_id] for stream_name in self._streams: for variable_name in self._variables: column_name = stream_name+'_'+variable_name try: value = epoch_data[stream_name][variable_name] except KeyError as ex: err_message = '`{}` not found in epoch data.'.format(column_name) if self._on_missing_variable == 'error': raise KeyError(err_message) from ex elif self._on_missing_variable == 'warn': logging.warning(err_message) values.append(self._default_value) continue if isinstance(value, dict) and 'mean' in value: value = value['mean'] elif isinstance(value, dict) and 'nanmean' in value: value = value['nanmean'] if np.isscalar(value): values.append(value) else: err_message = 'Variable `{}` value is not scalar.'.format(variable_name) if self._on_unknown_type == 'error': raise TypeError(err_message) elif self._on_unknown_type == 'warn': logging.warning(err_message) values.append(self._default_value) # write the row with open(self._file_path, 'a') as file: row = self._delimiter.join([str(value) for value in values]) file.write(row + '\n')
def _write_row(self, epoch_id: int, epoch_data: EpochData) -> None
Write a single epoch result row to the CSV file. :param epoch_id: epoch number (will be written at the first column) :param epoch_data: epoch data :raise KeyError: if the variable is missing and ``self._on_missing_variable`` is set to ``error`` :raise TypeError: if the variable has wrong type and ``self._on_unknown_type`` is set to ``error``
2.197672
1.943363
1.13086
logging.debug('Saving epoch %d data to "%s"', epoch_id, self._file_path) if not self._header_written: self._write_header(epoch_data=epoch_data) self._write_row(epoch_id=epoch_id, epoch_data=epoch_data)
def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None
Write a new row to the CSV file with the given epoch data. In the case of first invocation, create the CSV header. :param epoch_id: number of the epoch :param epoch_data: epoch data to be logged
3.391822
2.701779
1.255403
r = random.SystemRandom() return '{}{}{}'.format(r.choice(_left), sep, r.choice(_right))
def get_random_name(sep: str='-')
Generate random docker-like name with the given separator. :param sep: adjective-name separator string :return: random docker-like name
6.209736
5.767388
1.076698
if self._minutes is not None and (datetime.now() - self._training_start).total_seconds()/60 > self._minutes: raise TrainingTerminated('Training terminated after more than {} minutes'.format(self._minutes))
def _check_train_time(self) -> None
Stop the training if the training time exceeded ``self._minutes``. :raise TrainingTerminated: if the training time exceeded ``self._minutes``
5.225091
3.251444
1.607006
self._check_train_time() if self._iters is not None and stream_name == self._train_stream_name: self._iters_done += 1 if self._iters_done >= self._iters: raise TrainingTerminated('Training terminated after iteration {}'.format(self._iters_done))
def after_batch(self, stream_name: str, batch_data: Batch) -> None
If ``stream_name`` equals to :py:attr:`cxflow.constants.TRAIN_STREAM`, increase the iterations counter and possibly stop the training; additionally, call :py:meth:`_check_train_time`. :param stream_name: stream name :param batch_data: ignored :raise TrainingTerminated: if the number of iterations reaches ``self._iters``
4.783736
3.095861
1.545204
self._check_train_time() if self._epochs is not None and epoch_id >= self._epochs: logging.info('EpochStopperHook triggered') raise TrainingTerminated('Training terminated after epoch {}'.format(epoch_id))
def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None
Stop the training if the ``epoch_id`` reaches ``self._epochs``; additionally, call :py:meth:`_check_train_time`. :param epoch_id: epoch id :param epoch_data: ignored :raise TrainingTerminated: if the ``epoch_id`` reaches ``self._epochs``
7.886496
4.897401
1.610343
for part in reversed(url.split('/')): filename = re.sub(r'[^a-zA-Z0-9_.\-]', '', part) if len(filename) > 0: break else: raise ValueError('Could not create reasonable name for file from url %s', url) return filename
def sanitize_url(url: str) -> str
Sanitize the given url so that it can be used as a valid filename. :param url: url to create filename from :raise ValueError: when the given url can not be sanitized :return: created filename
4.292559
3.527077
1.21703
# make sure data_root exists os.makedirs(data_root, exist_ok=True) # create sanitized filename from url filename = sanitize_url(url) # check whether the archive already exists filepath = os.path.join(data_root, filename) if os.path.exists(filepath): logging.info('\t`%s` already exists; skipping', filepath) return # download with progressbar try: logging.info('\tdownloading %s', filepath) req = requests.get(url, stream=True) req.raise_for_status() except requests.exceptions.RequestException as ex: logging.error('File `%s` could not be downloaded, %s', filepath, ex) return expected_size = int(req.headers.get('content-length')) chunk_size = 1024 with open(filepath, 'wb') as f_out,\ click.progressbar(req.iter_content(chunk_size=chunk_size), length=expected_size/chunk_size) as bar: for chunk in bar: if chunk: f_out.write(chunk) f_out.flush() # extract try: shutil.unpack_archive(filepath, data_root) except (shutil.ReadError, ValueError): logging.info('File `%s` could not be extracted by `shutil.unpack_archive`. Please process it manually.', filepath)
def maybe_download_and_extract(data_root: str, url: str) -> None
Maybe download the specified file to ``data_root`` and try to unpack it with ``shutil.unpack_archive``. :param data_root: data root to download the files to :param url: url to download from
2.404433
2.366792
1.015904
if aggregation not in ComputeStats.EXTRA_AGGREGATIONS and not hasattr(np, aggregation): raise ValueError('Aggregation `{}` is not a NumPy function or a member ' 'of EXTRA_AGGREGATIONS.'.format(aggregation))
def _raise_check_aggregation(aggregation: str)
Check whether the given aggregation is present in NumPy or it is one of EXTRA_AGGREGATIONS. :param aggregation: the aggregation name :raise ValueError: if the specified aggregation is not supported or found in NumPy
9.01728
5.878479
1.533948
ComputeStats._raise_check_aggregation(aggregation) if aggregation == 'nanfraction': return np.sum(np.isnan(data)) / len(data) if aggregation == 'nancount': return int(np.sum(np.isnan(data))) return getattr(np, aggregation)(data)
def _compute_aggregation(aggregation: str, data: Iterable[Any])
Compute the specified aggregation on the given data. :param aggregation: the name of an arbitrary NumPy function (e.g., mean, max, median, nanmean, ...) or one of :py:attr:`EXTRA_AGGREGATIONS`. :param data: data to be aggregated :raise ValueError: if the specified aggregation is not supported or found in NumPy
4.782965
4.911005
0.973928
for stream_name in epoch_data.keys(): for variable, aggregations in self._variable_aggregations.items(): # variables are already checked in the AccumulatingHook; hence, we do not check them here epoch_data[stream_name][variable] = OrderedDict( {aggr: ComputeStats._compute_aggregation(aggr, self._accumulator[stream_name][variable]) for aggr in aggregations})
def _save_stats(self, epoch_data: EpochData) -> None
Extend ``epoch_data`` by stream:variable:aggreagation data. :param epoch_data: data source from which the statistics are computed
8.238931
6.334717
1.3006
self._save_stats(epoch_data) super().after_epoch(epoch_data=epoch_data, **kwargs)
def after_epoch(self, epoch_data: EpochData, **kwargs) -> None
Compute the specified aggregations and save them to the given epoch data. :param epoch_data: epoch data to be processed
4.5508
4.328929
1.051253
if self._output_dir is None: raise ValueError('Can not save TrainingTrace without output dir.') yaml_to_file(self._trace, self._output_dir, CXF_TRACE_FILE)
def save(self) -> None
Save the training trace to :py:attr:`CXF_TRACE_FILE` file under the specified directory. :raise ValueError: if no output directory was specified
11.229867
5.545403
2.025077
trace = TrainingTrace() trace._trace = load_config(filepath) return trace
def from_file(filepath: str)
Load training trace from the given ``filepath``. :param filepath: training trace file path :return: training trace
24.890722
16.338827
1.523409
if self._stream not in epoch_data: raise KeyError('The hook could not determine whether the threshold was exceeded as the stream `{}`' 'was not found in the epoch data'.format(self._stream)) if self._variable not in epoch_data[self._stream]: raise KeyError('The hook could not determine whether the threshold was exceeded as the variable `{}`' 'was not found in the epoch data stream `{}`'.format(self._variable, self._stream)) value = epoch_data[self._stream][self._variable] if isinstance(value, dict) and 'mean' in value: value = value['mean'] if not np.isscalar(value): raise TypeError('The value to be checked has to be either a scalar or a dict with `mean` key. ' 'Got `{}` instead.'.format(type(value).__name__)) if value > self._required_min_value: raise TrainingTerminated('{} {} level matched (current {} is greater than required {}).' .format(self._stream, self._variable, value, self._required_min_value)) elif epoch_id >= self._max_epoch: raise ValueError('{} {} was only {} in epoch {}, but {} was required. Training failed.' .format(self._stream, self._variable, value, epoch_id, self._required_min_value))
def after_epoch(self, epoch_id: int, epoch_data: EpochData)
Check termination conditions. :param epoch_id: number of the processed epoch :param epoch_data: epoch data to be checked :raise KeyError: if the stream of variable was not found in ``epoch_data`` :raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation :raise ValueError: if the specified number of epochs exceeded :raise TrainingTerminated: if the monitor variable is above the required level
3.483982
2.822868
1.2342
config = None try: config_path = find_config(config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root)
def train(config_path: str, cl_arguments: Iterable[str], output_root: str) -> None
Load config and start the training. :param config_path: path to configuration file :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created
4.006378
3.99484
1.002888
config = None try: model_dir = path.dirname(model_path) if not path.isdir(model_path) else model_path config_path = find_config(model_dir if config_path is None else config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) if stream_name == CXF_PREDICT_STREAM and stream_name in config: # old style ``cxflow predict ...`` logging.warning('Old style ``predict`` configuration section is deprecated and will not be supported, ' 'use ``eval.predict`` section instead.') config['eval'] = {'predict': config['predict']} if 'eval' in config and stream_name in config['eval']: update_section = config['eval'][stream_name] for subsection in ['dataset', 'model', 'main_loop']: if subsection in update_section: config[subsection].update(update_section[subsection]) if 'hooks' in update_section: config['hooks'] = update_section['hooks'] else: logging.warning('Config does not contain `eval.%s.hooks` section. ' 'No hook will be employed during the evaluation.', stream_name) config['hooks'] = [] validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root, restore_from=model_path, eval=stream_name)
def evaluate(model_path: str, stream_name: str, config_path: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None
Evaluate the given model on the specified data stream. Configuration is updated by the respective predict.stream_name section, in particular: - hooks section is entirely replaced - model and dataset sections are updated :param model_path: path to the model to be evaluated :param stream_name: data stream name to be evaluated :param config_path: path to the config to be used, if not specified infer the path from ``model_path`` :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created
4.207021
3.934634
1.069228
config = None try: config_path = find_config(config_path) restore_from = restore_from or path.dirname(config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) if 'predict' in config: for section in ['dataset', 'model', 'main_loop']: if section in config['predict']: config[section].update(config['predict'][section]) if 'hooks' in config['predict']: config['hooks'] = config['predict']['hooks'] else: logging.warning('Config does not contain `predict.hooks` section. ' 'No hook will be employed during the prediction.') config['hooks'] = [] validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root, restore_from=restore_from, eval='predict')
def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None
Run prediction from the specified config path. If the config contains a ``predict`` section: - override hooks with ``predict.hooks`` if present - update dataset, model and main loop sections if the respective sections are present :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created
3.712853
3.101959
1.196938
if streams is None: streams = [self._train_stream_name] + self._extra_streams return OrderedDict([(stream_name, OrderedDict()) for stream_name in streams])
def _create_epoch_data(self, streams: Optional[Iterable[str]]=None) -> EpochData
Create empty epoch data double dict.
4.922414
3.74366
1.314867
unused_sources = [source for source in batch.keys() if source not in self._model.input_names] missing_sources = [source for source in self._model.input_names if source not in batch.keys()] # check stream sources if unused_sources: if self._on_unused_sources == 'warn' and not self._extra_sources_warned: logging.warning('Some sources provided by the stream do not match model placeholders. Set ' '`main_loop.on_unused_sources` to `ignore` in order to suppress this warning. ' 'Extra sources: %s', unused_sources) self._extra_sources_warned = True elif self._on_unused_sources == 'error': raise ValueError('Some sources provided by the stream do not match model placeholders. Set' '`main_loop.on_unused_sources` to `warn` in order to suppress this error.\n' 'Extra sources: {}'.format(unused_sources)) if missing_sources: raise ValueError('Stream does not provide all required sources. Missing sources: {}' .format(missing_sources))
def _check_sources(self, batch: Dict[str, object]) -> None
Check for unused and missing sources. :param batch: batch to be checked :raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error``
2.879794
2.662205
1.081733
nonempty_batch_count = 0 for i, batch_input in enumerate(stream): self.raise_check_interrupt() batch_sizes = {len(source) for source in batch_input.values()} if len(batch_sizes) == 0 or batch_sizes == {0}: if self._on_empty_batch == 'warn': logging.warning('%i-th batch in stream `%s` appears to be empty (%i-th empty batch in total). Set ' '`main_loop.on_empty_batch` to `ignore` in order to suppress this warning.', i, stream.name, nonempty_batch_count) elif self._on_empty_batch == 'error': raise ValueError('{}-th batch in stream `{}` appears to be empty ({}-th empty batch in total). Set ' '`main_loop.on_empty_batch` to `warn` in order to change this error into warning; ' 'set to `ignore` to remove it.'.format(i, stream.name, nonempty_batch_count)) continue elif self._fixed_batch_size: if batch_sizes != {self._fixed_batch_size}: var, len_ = [(k, len(v)) for k, v in batch_input.items() if len(v) != self._fixed_batch_size][0] logging.debug('%i-th batch in stream `%s` has variable `%s` of length %i inconsistent with ' '`main_loop.fixed_size` = %i', i, stream.name, var, len_, self._fixed_batch_size) continue nonempty_batch_count += 1 self._check_sources(batch_input) with Timer('eval_batch_{}'.format(stream.name), self._epoch_profile): batch_output = self._model.run(batch=batch_input, train=train, stream=stream) assert set(batch_input.keys()).isdisjoint(set(batch_output)), 'Batch inputs and outputs must not overlap.' with Timer('after_batch_hooks_{}'.format(stream.name), self._epoch_profile): batch_data = {**batch_input, **batch_output} for hook in self._hooks: hook.after_batch(stream_name=stream.name, batch_data=batch_data) if nonempty_batch_count == 0: if self._on_empty_stream == 'warn': logging.warning('Stream `%s` appears to be empty. Set `main_loop.on_empty_stream` to `ignore` in order ' 'to suppress this warning.', stream.name) elif self._on_empty_stream == 'error': raise ValueError('Stream `{}` appears to be empty. Set ' '`main_loop.on_empty_stream` to `warn` in order to change this error into warning; ' 'set to `ignore` to remove it.'.format(stream.name))
def _run_epoch(self, stream: StreamWrapper, train: bool) -> None
Iterate through the given stream and evaluate/train the model with the received batches. Calls :py:meth:`cxflow.hooks.AbstractHook.after_batch` events. :param stream: stream to iterate :param train: if set to ``True``, the model will be trained :raise ValueError: in case of empty batch when ``on_empty_batch`` is set to ``error`` :raise ValueError: in case of empty stream when ``on_empty_stream`` is set to ``error`` :raise ValueError: in case of two batch variables having different lengths
2.649671
2.514997
1.053549
self._run_epoch(stream=stream, train=True)
def train_by_stream(self, stream: StreamWrapper) -> None
Train the model with the given stream. :param stream: stream to train with
15.710015
14.516537
1.082215
self._run_epoch(stream=stream, train=False)
def evaluate_stream(self, stream: StreamWrapper) -> None
Evaluate the given stream. :param stream: stream to be evaluated :param stream_name: stream name
20.375593
26.154877
0.779036
if stream_name not in self._streams: stream_fn_name = '{}_stream'.format(stream_name) try: stream_fn = getattr(self._dataset, stream_fn_name) stream_epoch_limit = -1 if self._fixed_epoch_size is not None and stream_name == self._train_stream_name: stream_epoch_limit = self._fixed_epoch_size self._streams[stream_name] = StreamWrapper(stream_fn, buffer_size=self._buffer, epoch_size=stream_epoch_limit, name=stream_name, profile=self._epoch_profile) except AttributeError as ex: raise AttributeError('The dataset does not have a function for creating a stream named `{}`. ' 'The function has to be named `{}`.'.format(stream_name, stream_fn_name)) from ex return self._streams[stream_name]
def get_stream(self, stream_name: str) -> StreamWrapper
Get a :py:class:`StreamWrapper` with the given name. :param stream_name: stream name :return: dataset function name providing the respective stream :raise AttributeError: if the dataset does not provide the function creating the stream
2.944189
2.797191
1.052552
for stream_name in streams: with self.get_stream(stream_name) as stream: self.evaluate_stream(stream) epoch_data = self._create_epoch_data(streams) for hook in self._hooks: hook.after_epoch(epoch_id=0, epoch_data=epoch_data)
def _run_zeroth_epoch(self, streams: Iterable[str]) -> None
Run zeroth epoch on the specified streams. Calls - :py:meth:`cxflow.hooks.AbstractHook.after_epoch` :param streams: stream names to be evaluated
3.595519
3.407332
1.05523
# Initialization: before_training for hook in self._hooks: hook.before_training() try: run_func() except TrainingTerminated as ex: logging.info('Training terminated: %s', ex) # After training: after_training for hook in self._hooks: hook.after_training()
def _try_run(self, run_func: Callable[[], None]) -> None
Try running the given function (training/prediction). Calls - :py:meth:`cxflow.hooks.AbstractHook.before_training` - :py:meth:`cxflow.hooks.AbstractHook.after_training` :param run_func: function to be run
4.40769
4.058091
1.086148
for stream_name in [self._train_stream_name] + self._extra_streams: self.get_stream(stream_name) def training(): logging.debug('Training started') self._epochs_done = 0 # Zeroth epoch: after_epoch if not self._skip_zeroth_epoch: logging.info('Evaluating 0th epoch') self._run_zeroth_epoch([self._train_stream_name] + self._extra_streams) logging.info('0th epoch done\n\n') # Training loop: after_epoch, after_epoch_profile while True: epoch_id = self._epochs_done + 1 logging.info('Training epoch %s', epoch_id) self._epoch_profile.clear() epoch_data = self._create_epoch_data() with self.get_stream(self._train_stream_name) as stream: self.train_by_stream(stream) for stream_name in self._extra_streams: with self.get_stream(stream_name) as stream: self.evaluate_stream(stream) with Timer('after_epoch_hooks', self._epoch_profile): for hook in self._hooks: hook.after_epoch(epoch_id=epoch_id, epoch_data=epoch_data) for hook in self._hooks: hook.after_epoch_profile(epoch_id=epoch_id, profile=self._epoch_profile, train_stream_name=self._train_stream_name, extra_streams=self._extra_streams) self._epochs_done = epoch_id if trace is not None: trace[TrainingTraceKeys.EPOCHS_DONE] = self._epochs_done logging.info('Epoch %s done\n\n', epoch_id) self._try_run(training)
def run_training(self, trace: Optional[TrainingTrace]=None) -> None
Run the main loop in the training mode. Calls - :py:meth:`cxflow.hooks.AbstractHook.after_epoch` - :py:meth:`cxflow.hooks.AbstractHook.after_epoch_profile`
3.001684
2.929738
1.024557
def prediction(): logging.info('Running prediction') self._run_zeroth_epoch([stream_name]) logging.info('Prediction done\n\n') self._try_run(prediction)
def run_evaluation(self, stream_name: str) -> None
Run the main loop with the given stream in the prediction mode. :param stream_name: name of the stream to be evaluated
11.130421
11.582154
0.960997
return [Counter(votes).most_common()[0][0] for votes in zip(*all_votes)]
def major_vote(all_votes: Iterable[Iterable[Hashable]]) -> Iterable[Hashable]
For the given iterable of object iterations, return an iterable of the most common object at each position of the inner iterations. E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects at the first and second positions respectively. :param all_votes: an iterable of object iterations :return: the most common objects in the iterations (the major vote)
3.945724
7.353713
0.536562
if self._models is None: logging.info('Loading %d models', len(self._model_paths)) def load_model(model_path: str): logging.debug('\tloading %s', model_path) if path.isdir(model_path): model_path = path.join(model_path, CXF_CONFIG_FILE) config = load_config(model_path) config['model']['inputs'] = self._inputs config['model']['outputs'] = self._outputs return create_model(config, output_dir=None, dataset=self._dataset, restore_from=path.dirname(model_path)) self._models = list(map(load_model, self._model_paths))
def _load_models(self) -> None
Maybe load all the models to be assembled together and save them to the ``self._models`` attribute.
3.157053
2.949748
1.070279
if train: raise ValueError('Ensemble model cannot be trained.') self._load_models() # run all the models batch_outputs = [model.run(batch, False, stream) for model in self._models] # aggregate the outputs aggregated = {} for output_name in self._outputs: output_values = [batch_output[output_name] for batch_output in batch_outputs] if self._aggregation == 'mean': aggregated[output_name] = np.mean(output_values, axis=0) elif self._aggregation == 'major_vote': output_values_arr = np.array(output_values) output = major_vote(output_values_arr.reshape((output_values_arr.shape[0], -1))) aggregated[output_name] = np.array(output).reshape(output_values_arr[0].shape) return aggregated
def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch
Run feed-forward pass with the given batch using all the models, aggregate and return the results. .. warning:: :py:class:`Ensemble` can not be trained. :param batch: batch to be processed :param train: ``True`` if this batch should be used for model update, ``False`` otherwise :param stream: stream wrapper (useful for precise buffer management) :return: aggregated results dict :raise ValueError: if the ``train`` flag is set to ``True``
2.714924
2.633717
1.030833
last_dot = fq_name.rfind('.') if last_dot != -1: return fq_name[:last_dot], fq_name[last_dot + 1:] else: return None, fq_name
def parse_fully_qualified_name(fq_name: str) -> Tuple[Optional[str], str]
Parse the given fully-quallified name (separated with dots) to a tuple of module and class names. :param fq_name: fully qualified name separated with dots :return: ``None`` instead of module if the given name contains no separators (dots).
1.888244
2.094707
0.901436
assert isinstance(module_name, str) assert isinstance(attribute_name, str) _module = importlib.import_module(module_name) return getattr(_module, attribute_name)
def get_attribute(module_name: str, attribute_name: str)
Get the specified module attribute. It most cases, it will be a class or function. :param module_name: module name :param attribute_name: attribute name :return: module attribute
2.602404
2.656311
0.979706
return get_attribute(module_name, class_name)(*args, **kwargs)
def create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT)
Create an object instance of the given class from the given module. Args and kwargs are passed to the constructor. This mimics the following code: .. code-block:: python from module import class return class(*args, **kwargs) :param module_name: module name :param class_name: class name :param args: args to be passed to the object constructor :param kwargs: kwargs to be passed to the object constructor :return: created object instance
5.329707
8.022625
0.664335
def list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index _module = importlib.import_module(module_name) return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)]
List full names of all the submodules in the given module. :param module_name: name of the module of which the submodules will be listed
null
null
null
try: # the sub-module to be included may be erroneous and we need to continue submodule = importlib.import_module(submodule_name) if hasattr(submodule, class_name): matched_submodules.append(submodule_name) except Exception as ex: # pylint: disable=broad-except erroneous_submodules.append((submodule_name, ex)) return matched_submodules, erroneous_submodules
def find_class_module(module_name: str, class_name: str) \ -> Tuple[List[str], List[Tuple[str, Exception]]]: # pylint: disable=invalid-sequence-index matched_submodules = [] erroneous_submodules = [] for submodule_name in list_submodules(module_name)
Find sub-modules of the given module that contain the given class. Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples. :param module_name: name of the module to be searched :param class_name: searched class name :return: a tuple of sub-modules having the searched class and sub-modules that could not be searched
2.9121
3.120176
0.933313
matched_modules, erroneous_modules = find_class_module(module_name, class_name) for submodule, error in erroneous_modules: logging.warning('Could not inspect sub-module `%s` due to `%s` ' 'when searching for `%s` in sub-modules of `%s`.', submodule, type(error).__name__, class_name, module_name) if len(matched_modules) == 1: return matched_modules[0] if len(matched_modules) > 1: # check if all the module attributes point to the same class first_class = getattr(importlib.import_module(matched_modules[0]), class_name) for matched_module in matched_modules: another_class = getattr(importlib.import_module(matched_module), class_name) if another_class is not first_class: raise ValueError('Found more than one sub-module when searching for `{}` in sub-modules of `{}`. ' 'Please specify the module explicitly. Found sub-modules: `{}`' .format(class_name, module_name, matched_modules)) return matched_modules[0] return None
def get_class_module(module_name: str, class_name: str) -> Optional[str]
Get a sub-module of the given module which has the given class. This method wraps `utils.reflection.find_class_module method` with the following behavior: - raise error when multiple sub-modules with different classes with the same name are found - return None when no sub-module is found - warn about non-searchable sub-modules .. note:: This function logs! :param module_name: module to be searched :param class_name: searched class name :return: sub-module with the searched class or None
2.704412
2.608617
1.036723
# make sure the path contains the current working directory sys.path.insert(0, os.getcwd()) parser = get_cxflow_arg_parser(True) # parse CLI arguments known_args, unknown_args = parser.parse_known_args() # show help if no subcommand was specified. if not hasattr(known_args, 'subcommand'): parser.print_help() quit(1) # set up global logger logger = logging.getLogger('') logger.setLevel(logging.DEBUG if known_args.verbose else logging.INFO) logger.handlers = [] # remove default handlers # set up STDERR handler stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT)) logger.addHandler(stderr_handler) if known_args.subcommand == 'train': train(config_path=known_args.config_file, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'resume': resume(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'predict': logging.warning('Predict command is deprecated and will be removed, use ``cxflow eval predict ...`` instead') predict(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'eval': evaluate(model_path=known_args.model_path, stream_name=known_args.stream_name, config_path=known_args.config, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'dataset': invoke_dataset_method(config_path=known_args.config_file, method_name=known_args.method, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'gridsearch': grid_search(script=known_args.script, params=known_args.params, dry_run=known_args.dry_run) elif known_args.subcommand == 'ls': list_train_dirs(known_args.dir, known_args.recursive, known_args.all, known_args.long, known_args.verbose) elif known_args.subcommand == 'prune': prune_train_dirs(known_args.dir, known_args.epochs, known_args.subdirs)
def entry_point() -> None
**cxflow** entry point.
2.291168
2.205858
1.038674
if self._stream is None: self._stream = iter(self._get_stream_fn()) return self._stream
def _get_stream(self) -> Iterator
Possibly create and return raw dataset stream iterator.
4.6784
3.094398
1.511893
while True: self._stream = self._get_stream() while True: # Acquire the semaphore before processing the next batch # but immediately release it so that other threads # are not blocked when they decide to acquire it again. with self._semaphore: pass # It always takes a short moment before the native call actually # releases the GIL and we are free to compute. The following sleep # is here to compensate for this short moment - we don't want to # slow down the native call before the GIL is released. time.sleep(CXF_BUFFER_SLEEP) try: batch = next(self._stream) except StopIteration: break self._queue.put(batch) self._batch_count += 1 if stop_event.is_set(): return if self._epoch_limit_reached(): self._queue.put(None) self._batch_count = 0 return self._stream = None # yield a new iterator next time if self._epoch_size <= 0: # for non-fixed size epochs self._queue.put(None) self._batch_count = 0 return
def _enqueue_batches(self, stop_event: Event) -> None
Enqueue all the stream batches. If specified, stop after ``epoch_size`` batches. .. note:: Signal the epoch end with ``None``. Stop when: - ``stop_event`` is risen - stream ends and epoch size is not set - specified number of batches is enqueued .. note:: This is used only with ``buffer`` > 0. :param stop_event: event signaling stop instruction
6.008936
5.907131
1.017234
if self._enqueueing_thread is None: raise ValueError('StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.' .format(self._name, self._buffer_size)) if not self._enqueueing_thread.is_alive() and self._queue.empty(): self._start_thread() while True: try: batch = self._queue.get(timeout=2) self._queue.task_done() break except Empty: if not self._enqueueing_thread.is_alive(): try: # the enqueueing thread may just finished properly so lets check the queue eagerly batch = self._queue.get_nowait() self._queue.task_done() break except Empty: # so we failed to retrieve a batch and the enqueueing thread is dead # there is no hope, something must went wrong raise ChildProcessError('Enqueueing thread ended unexpectedly.') return batch
def _dequeue_batch(self) -> Optional[Batch]
Return a single batch from queue or ``None`` signaling epoch end. :raise ChildProcessError: if the enqueueing thread ended unexpectedly
4.937529
4.416122
1.118069
if self._epoch_limit_reached(): self._batch_count = 0 return None try: batch = next(self._get_stream()) self._batch_count += 1 return batch except StopIteration: self._stream = None # yield a new iterator next time if self._epoch_size > 0: # underlying stream ended but our fixed size epoch did not batch = next(self._get_stream()) # get another stream and return its 1st batch self._batch_count += 1 return batch else: self._batch_count = 0 return None
def _next_batch(self) -> Optional[Batch]
Return a single batch or ``None`` signaling epoch end. .. note:: Signal the epoch end with ``None``. Stop when: - stream ends and epoch size is not set - specified number of batches is returned :return: a single batch or ``None`` signaling epoch end
4.372534
4.429422
0.987157
self._stopping_event = Event() self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,)) self._enqueueing_thread.start()
def _start_thread(self)
Start an enqueueing thread.
3.882436
2.984698
1.30078
self._stopping_event.set() queue_content = [] try: # give the enqueueing thread chance to put a batch to the queue and check the stopping event while True: queue_content.append(self._queue.get_nowait()) except Empty: pass self._enqueueing_thread.join() try: queue_content.append(self._queue.get_nowait()) # collect the very last item except Empty: pass self._queue = Queue(max(len(queue_content), self._buffer_size)) # queue content may be bigger than queue size for batch in queue_content: self._queue.put(batch)
def _stop_thread(self)
Stop the enqueueing thread. Keep the queue content and stream state.
4.095849
3.536085
1.1583
SaveEvery.save_model(model=self._model, name_suffix=str(epoch_id), on_failure=self._on_save_failure)
def _after_n_epoch(self, epoch_id: int, **_) -> None
Save the model every ``n_epochs`` epoch. :param epoch_id: number of the processed epoch
10.85335
11.12625
0.975472
try: logging.debug('Saving the model') save_path = model.save(name_suffix) logging.info('Model saved to: %s', save_path) except Exception as ex: # pylint: disable=broad-except if on_failure == 'error': raise IOError('Failed to save the model.') from ex elif on_failure == 'warn': logging.warning('Failed to save the model.')
def save_model(model: AbstractModel, name_suffix: str, on_failure: str) -> None
Save the given model with the given name_suffix. On failure, take the specified action. :param model: the model to be saved :param name_suffix: name to be used for saving :param on_failure: action to be taken on failure; one of :py:attr:`SAVE_FAILURE_ACTIONS` :raise IOError: on save failure with ``on_failure`` set to ``error``
2.587712
2.395357
1.080303
if self._stream_name not in epoch_data: raise KeyError('Stream `{}` was not found in the epoch data.\nAvailable streams are `{}`.' .format(self._stream_name, epoch_data.keys())) stream_data = epoch_data[self._stream_name] if self._variable not in stream_data: raise KeyError('Variable `{}` for stream `{}` was not found in the epoch data. ' 'Available variables for stream `{}` are `{}`.' .format(self._variable, self._stream_name, self._stream_name, stream_data.keys())) value = stream_data[self._variable] if self._aggregation: if not isinstance(value, dict): raise TypeError('Variable `{}` is expected to be a dict when aggregation is specified. ' 'Got `{}` instead.'.format(self._variable, type(value).__name__)) if self._aggregation not in value: raise KeyError('Specified aggregation `{}` was not found in the variable `{}`. ' 'Available aggregations: `{}`.'.format(self._aggregation, self._variable, value.keys())) value = value[self._aggregation] if not np.isscalar(value): raise ValueError('Variable `{}` value is not a scalar.'.format(value)) return value
def _get_value(self, epoch_data: EpochData) -> float
Retrieve the value of the monitored variable from the given epoch data. :param epoch_data: epoch data which determine whether the model will be saved or not :raise KeyError: if any of the specified stream, variable or aggregation is not present in the ``epoch_data`` :raise TypeError: if the variable value is not a dict when aggregation is specified :raise ValueError: if the variable value is not a scalar
2.076195
1.781121
1.165667
if self._best_value is None: return True if self._condition == 'min': return new_value < self._best_value if self._condition == 'max': return new_value > self._best_value
def _is_value_better(self, new_value: float) -> bool
Test if the new value is better than the best so far. :param new_value: current value of the objective function
2.590539
2.658978
0.974261
new_value = self._get_value(epoch_data) if self._is_value_better(new_value): self._best_value = new_value SaveEvery.save_model(model=self._model, name_suffix=self._OUTPUT_NAME, on_failure=self._on_save_failure)
def after_epoch(self, epoch_data: EpochData, **_) -> None
Save the model if the new value of the monitored variable is better than the best value so far. :param epoch_data: epoch data to be processed
6.114783
5.600642
1.0918
percent = '{0:.1f}'.format(100 * (done / float(total))) base_len = shutil.get_terminal_size().columns - 7 - len(prefix) - len(suffix) base_len = min([base_len, 50]) min_length = base_len - 1 - len('{}/{}={}'.format(total, total, '100.0')) length = base_len - len('{}/{}={}'.format(done, total, percent)) if min_length > 0: filled_len = int(min_length * done // total) bar = '='*filled_len + '-'*(min_length - filled_len) spacing = ' '*(length - min_length) print('\r{}: |{}|{}{}/{}={}% {}'.format(prefix, bar, spacing, done, total, percent, suffix), end='\r') else: short_progress = '\r{}: {}/{}'.format(prefix, done, total) if len(short_progress) <= shutil.get_terminal_size().columns: print(short_progress, end='\r') else: print(['-', '\\', '|', '/'][done % 4], end='\r')
def print_progress_bar(done: int, total: int, prefix: str = '', suffix: str = '') -> None
Print a progressbar with the given prefix and suffix, without newline at the end. param done: current step in computation param total: total count of steps in computation param prefix: info text displayed before the progress bar param suffix: info text displayed after the progress bar
2.735448
2.698181
1.013812
seconds = round(seconds) m, s = divmod(seconds, 60) h, m = divmod(m, 60) return '{:d}:{:02d}:{:02d}'.format(h, m, s)
def get_formatted_time(seconds: float) -> str
Convert seconds to the time format ``H:M:S.UU``. :param seconds: time in seconds :return: formatted human-readable time
1.944126
2.245119
0.865935
if self._current_stream_name is None or self._current_stream_name != stream_name: self._current_stream_name = stream_name self._current_stream_start = None erase_line() self._current_batch_count[stream_name] += 1 current_batch = self._current_batch_count[stream_name] # total batch count is available if stream_name in self._total_batch_count: # compute ETA total_batches = self._total_batch_count[stream_name] if self._current_stream_start: measured_batches = current_batch - 1 avg_batch_time = (time.time() - self._current_stream_start) / measured_batches eta_sec = avg_batch_time * (total_batches - current_batch) eta = get_formatted_time(eta_sec) else: self._current_stream_start = time.time() eta = '' print_progress_bar(current_batch, total_batches, prefix=stream_name, suffix=eta) # total batch count is not available (1st epoch) else: short_progress = '{}: {}'.format(stream_name, current_batch) if len(short_progress) <= shutil.get_terminal_size().columns: print(short_progress, end='\r') else: print(['-', '\\', '|', '/'][current_batch % 4], end='\r')
def after_batch(self, stream_name: str, batch_data: Batch) -> None
Display the progress and ETA for the current stream in the epoch. If the stream size (total batch count) is unknown (1st epoch), print only the number of processed batches.
2.758272
2.484473
1.110204
if not self._total_batch_count_saved: self._total_batch_count = self._current_batch_count.copy() self._total_batch_count_saved = True self._current_batch_count.clear() self._current_stream_start = None self._current_stream_name = None erase_line()
def after_epoch(self, **_) -> None
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
4.341773
3.266089
1.329349
read_data_total = 0 eval_total = 0 train_total = sum(profile.get('eval_batch_{}'.format(train_stream_name), [])) hooks_total = sum(profile.get('after_epoch_hooks', [])) for stream_name in chain(extra_streams, [train_stream_name]): read_data_total += sum(profile.get('read_batch_' + stream_name, [])) hooks_total += sum(profile.get('after_batch_hooks_' + stream_name, [])) if stream_name != train_stream_name: eval_total += sum(profile.get('eval_batch_' + stream_name, [])) logging.info('\tT read data:\t%f', read_data_total) logging.info('\tT train:\t%f', train_total) logging.info('\tT eval:\t%f', eval_total) logging.info('\tT hooks:\t%f', hooks_total)
def after_epoch_profile(self, epoch_id, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None
Summarize and log the given epoch profile. The profile is expected to contain at least: - ``read_data_train``, ``eval_batch_train`` and ``after_batch_hooks_train`` entries produced by the train stream (if train stream name is `train`) - ``after_epoch_hooks`` entry :param profile: epoch timings profile :param extra_streams: enumeration of additional stream names
2.541979
2.183242
1.164314
assert np.issubclass_(expected.dtype.type, np.integer), " Classes' indices must be integers" assert np.issubclass_(predicted.dtype.type, np.integer), " Classes' indices must be integers" assert expected.shape == predicted.shape, "Predicted and expected data must be the same length" assert num_classes > np.max([predicted, expected]), \ "Number of classes must be at least the number of indices in predicted/expected data" assert np.min([predicted, expected]) >= 0, " Classes' indices must be positive integers" cm_abs = np.zeros((num_classes, num_classes), dtype=np.int32) for pred, exp in zip(predicted, expected): cm_abs[exp, pred] += 1 return cm_abs
def confusion_matrix(expected: np.ndarray, predicted: np.ndarray, num_classes: int) -> np.ndarray
Calculate and return confusion matrix for the predicted and expected labels :param expected: array of expected classes (integers) with shape `[num_of_data]` :param predicted: array of predicted classes (integers) with shape `[num_of_data]` :param num_classes: number of classification classes :return: confusion matrix (cm) with absolute values
2.673908
2.490925
1.07346
param_space = OrderedDict() for arg in params: assert '=' in arg name = arg[:arg.index('=')] options = arg[arg.index('=') + 1:] options = ast.literal_eval(options) assert isinstance(options, list), options param_space[name] = options param_names = param_space.keys() commands = [] for values in itertools.product(*[param_space[name] for name in param_names]): command = str(script).split() for name, value in zip(param_names, values): command.append(str(name) + '="' + str(value) + '"') commands.append(command) return commands
def _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]
Build all grid search parameter configurations. :param script: String of command prefix, e.g. ``cxflow train -v -o log``. :param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'`` or ``'text_param=["hello", "cio"]'``.
2.261208
2.448348
0.923565
commands = _build_grid_search_commands(script=script, params=params) if dry_run: logging.warning('Dry run') for command in commands: logging.info(command) else: for command in commands: try: completed_process = subprocess.run(command) logging.info('Command `%s` completed with exit code %d', command, completed_process.returncode) except Exception as _: # pylint: disable=broad-except logging.error('Command `%s` failed.', command)
def grid_search(script: str, params: typing.Iterable[str], dry_run: bool=False) -> None
Build all grid search parameter configurations and optionally run them. :param script: String of command prefix, e.g. ``cxflow train -v -o log``. :param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'`` or ``'text_param=["hello", "cio"]'``. :param dry_run: If set to ``True``, the built commands will only be printed instead of executed.
2.565343
2.511847
1.021298
stream_names = [stream_name for stream_name in dir(self) if 'stream' in stream_name and stream_name != 'stream_info'] logging.info('Found %s stream candidates: %s', len(stream_names), stream_names) for stream_name in stream_names: try: stream_fn = getattr(self, stream_name) logging.info(stream_name) batch = next(iter(stream_fn())) rows = [] for key, value in batch.items(): try: value_arr = np.array(value) row = [key, value_arr.dtype, value_arr.shape] if value_arr.dtype.kind in 'bui': # boolean, unsigned, integer row.append('{} - {}'.format(value_arr.min(), value_arr.max())) elif value_arr.dtype.kind is 'f': row.append('{0:.2f} - {1:.2f}'.format(value_arr.min(), value_arr.max())) except ValueError: # np broadcasting failed (ragged array) value_arr = None row = [key, '{}'.format(type(value[0]).__name__), '({},)'.format(len(list(value)))] if value_arr is None or \ (value_arr.ndim > 0 and value_arr.shape[1:] != np.array(value_arr[0]).shape): logging.warning('*** stream source `%s` appears to be ragged (non-rectangular) ***', key) rows.append(row) for line in tabulate.tabulate(rows, headers=['name', 'dtype', 'shape', 'range'], tablefmt='grid').split('\n'): logging.info(line) except Exception: logging.warning('Exception was raised during checking stream `%s`, ' '(stack trace is displayed only with --verbose flag)', stream_name) logging.debug(traceback.format_exc())
def stream_info(self) -> None
Check and report source names, dtypes and shapes of all the streams available.
3.227123
3.128406
1.031555
assert '=' in arg, 'Unrecognized argument `{}`. [name]=[value] expected.'.format(arg) key = arg[:arg.index('=')] value = yaml.load(arg[arg.index('=') + 1:]) return key, value
def parse_arg(arg: str) -> typing.Tuple[str, typing.Any]
Parse CLI argument in format ``key=value`` to ``(key, value)`` :param arg: CLI argument string :return: tuple (key, value) :raise: yaml.ParserError: on yaml parse error
4.410834
5.263324
0.838032
config = load_yaml(config_file) for key_full, value in [parse_arg(arg) for arg in additional_args]: key_split = key_full.split('.') key_prefix = key_split[:-1] key = key_split[-1] conf = config for key_part in key_prefix: conf = conf[key_part] conf[key] = value return reload(config)
def load_config(config_file: str, additional_args: typing.Iterable[str]=()) -> dict
Load config from YAML ``config_file`` and extend/override it with the given ``additional_args``. :param config_file: path the YAML config file to be loaded :param additional_args: additional args which may extend or override the config loaded from the file. :return: configuration as dict
2.953205
3.018826
0.978263
if path.isdir(config_path): # dir specified instead of config file config_path = path.join(config_path, CXF_CONFIG_FILE) assert path.exists(config_path), '`{}` does not exist'.format(config_path) return config_path
def find_config(config_path: str) -> str
Derive configuration file path from the given path and check its existence. The given path is expected to be either 1. path to the file 2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE`` :param config_path: path to the configuration file or its parent directory :return: validated configuration file path
4.243945
3.642471
1.165128
logging.error('%s', message) logging.exception('%s', ex) sys.exit(1)
def fallback(message: str, ex: Exception) -> None
Fallback procedure when a cli command fails. :param message: message to be logged :param ex: Exception which caused the failure
5.45981
5.866158
0.93073
self._data_root = data_root self._download_urls = download_urls
def _configure_dataset(self, data_root: str=None, download_urls: Iterable[str]=None, **kwargs) -> None
Save the passed values and use them as a default property implementation. :param data_root: directory to which the files will be downloaded :param download_urls: list of URLs to be downloaded
3.596983
3.218668
1.117538
config = None try: config_path = find_config(config_path) restore_from = restore_from or path.dirname(config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root, restore_from=restore_from)
def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None
Load config from the directory specified and start the training. :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created
3.617867
3.660303
0.988406
pass
def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None
After epoch profile event. This event provides opportunity to process time profile of the finished epoch. :param epoch_id: finished epoch id :param profile: dictionary of lists of event timings that were measured during the epoch :param extra_streams: enumeration of additional stream names
72,097.921875
48,998.234375
1.471439