sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def subject(self, value):
"""
An asn1crypto.x509.Name object, or a dict with at least the
following keys:
- country_name
- state_or_province_name
- locality_name
- organization_name
- common_name
Less common keys include:
- organizational_unit_name
- email_address
- street_address
- postal_code
- business_category
- incorporation_locality
- incorporation_state_or_province
- incorporation_country
Uncommon keys include:
- surname
- title
- serial_number
- name
- given_name
- initials
- generation_qualifier
- dn_qualifier
- pseudonym
- domain_component
All values should be unicode strings
"""
is_dict = isinstance(value, dict)
if not isinstance(value, x509.Name) and not is_dict:
raise TypeError(_pretty_message(
'''
subject must be an instance of asn1crypto.x509.Name or a dict,
not %s
''',
_type_name(value)
))
if is_dict:
value = x509.Name.build(value)
self._subject = value | An asn1crypto.x509.Name object, or a dict with at least the
following keys:
- country_name
- state_or_province_name
- locality_name
- organization_name
- common_name
Less common keys include:
- organizational_unit_name
- email_address
- street_address
- postal_code
- business_category
- incorporation_locality
- incorporation_state_or_province
- incorporation_country
Uncommon keys include:
- surname
- title
- serial_number
- name
- given_name
- initials
- generation_qualifier
- dn_qualifier
- pseudonym
- domain_component
All values should be unicode strings | entailment |
def subject_public_key(self, value):
"""
An asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey
object of the subject's public key.
"""
is_oscrypto = isinstance(value, asymmetric.PublicKey)
if not isinstance(value, keys.PublicKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
subject_public_key must be an instance of
asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey,
not %s
''',
_type_name(value)
))
if is_oscrypto:
value = value.asn1
self._subject_public_key = value
self._key_identifier = self._subject_public_key.sha1
self._authority_key_identifier = None | An asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey
object of the subject's public key. | entailment |
def hash_algo(self, value):
"""
A unicode string of the hash algorithm to use when signing the
request - "sha1" (not recommended), "sha256" or "sha512"
"""
if value not in set(['sha1', 'sha256', 'sha512']):
raise ValueError(_pretty_message(
'''
hash_algo must be one of "sha1", "sha256", "sha512", not %s
''',
repr(value)
))
self._hash_algo = value | A unicode string of the hash algorithm to use when signing the
request - "sha1" (not recommended), "sha256" or "sha512" | entailment |
def _get_subject_alt(self, name):
"""
Returns the native value for each value in the subject alt name
extension reqiest that is an asn1crypto.x509.GeneralName of the type
specified by the name param
:param name:
A unicode string use to filter the x509.GeneralName objects by -
is the choice name x509.GeneralName
:return:
A list of unicode strings. Empty list indicates no subject alt
name extension request.
"""
if self._subject_alt_name is None:
return []
output = []
for general_name in self._subject_alt_name:
if general_name.name == name:
output.append(general_name.native)
return output | Returns the native value for each value in the subject alt name
extension reqiest that is an asn1crypto.x509.GeneralName of the type
specified by the name param
:param name:
A unicode string use to filter the x509.GeneralName objects by -
is the choice name x509.GeneralName
:return:
A list of unicode strings. Empty list indicates no subject alt
name extension request. | entailment |
def _set_subject_alt(self, name, values):
"""
Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects
"""
if self._subject_alt_name is not None:
filtered_general_names = []
for general_name in self._subject_alt_name:
if general_name.name != name:
filtered_general_names.append(general_name)
self._subject_alt_name = x509.GeneralNames(filtered_general_names)
else:
self._subject_alt_name = x509.GeneralNames()
if values is not None:
for value in values:
new_general_name = x509.GeneralName(name=name, value=value)
self._subject_alt_name.append(new_general_name)
if len(self._subject_alt_name) == 0:
self._subject_alt_name = None | Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects | entailment |
def set_extension(self, name, value):
"""
Sets the value for an extension using a fully constructed Asn1Value
object from asn1crypto. Normally this should not be needed, and the
convenience attributes should be sufficient.
See the definition of asn1crypto.x509.Extension to determine the
appropriate object type for a given extension. Extensions are marked
as critical when RFC5280 or RFC6960 indicate so. If an extension is
validly marked as critical or not (such as certificate policies and
extended key usage), this class will mark it as non-critical.
:param name:
A unicode string of an extension id name from
asn1crypto.x509.ExtensionId
:param value:
A value object per the specs defined by asn1crypto.x509.Extension
"""
extension = x509.Extension({
'extn_id': name
})
# We use native here to convert OIDs to meaningful names
name = extension['extn_id'].native
spec = extension.spec('extn_value')
if not isinstance(value, spec) and value is not None:
raise TypeError(_pretty_message(
'''
value must be an instance of %s, not %s
''',
_type_name(spec),
_type_name(value)
))
if name in self._special_extensions:
setattr(self, '_%s' % name, value)
else:
if value is None:
if name in self._other_extensions:
del self._other_extensions[name]
else:
self._other_extensions[name] = value | Sets the value for an extension using a fully constructed Asn1Value
object from asn1crypto. Normally this should not be needed, and the
convenience attributes should be sufficient.
See the definition of asn1crypto.x509.Extension to determine the
appropriate object type for a given extension. Extensions are marked
as critical when RFC5280 or RFC6960 indicate so. If an extension is
validly marked as critical or not (such as certificate policies and
extended key usage), this class will mark it as non-critical.
:param name:
A unicode string of an extension id name from
asn1crypto.x509.ExtensionId
:param value:
A value object per the specs defined by asn1crypto.x509.Extension | entailment |
def _determine_critical(self, name):
"""
:return:
A boolean indicating the correct value of the critical flag for
an extension, based on information from RFC5280 and RFC 6960. The
correct value is based on the terminology SHOULD or MUST.
"""
if name == 'subject_alt_name':
return len(self._subject) == 0
if name == 'basic_constraints':
return self.ca is True
return {
'subject_directory_attributes': False,
'key_usage': True,
'issuer_alt_name': False,
'name_constraints': True,
# Based on example EV certificates, non-CA certs have this marked
# as non-critical, most likely because existing browsers don't
# seem to support policies or name constraints
'certificate_policies': False,
'policy_mappings': True,
'policy_constraints': True,
'extended_key_usage': False,
'inhibit_any_policy': True,
'subject_information_access': False,
'tls_feature': False,
'ocsp_no_check': False,
}.get(name, False) | :return:
A boolean indicating the correct value of the critical flag for
an extension, based on information from RFC5280 and RFC 6960. The
correct value is based on the terminology SHOULD or MUST. | entailment |
def build(self, signing_private_key):
"""
Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request
"""
is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)
if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
signing_private_key must be an instance of
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''',
_type_name(signing_private_key)
))
signature_algo = signing_private_key.algorithm
if signature_algo == 'ec':
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)
def _make_extension(name, value):
return {
'extn_id': name,
'critical': self._determine_critical(name),
'extn_value': value
}
extensions = []
for name in sorted(self._special_extensions):
value = getattr(self, '_%s' % name)
if value is not None:
extensions.append(_make_extension(name, value))
for name in sorted(self._other_extensions.keys()):
extensions.append(_make_extension(name, self._other_extensions[name]))
attributes = []
if extensions:
attributes.append({
'type': 'extension_request',
'values': [extensions]
})
certification_request_info = csr.CertificationRequestInfo({
'version': 'v1',
'subject': self._subject,
'subject_pk_info': self._subject_public_key,
'attributes': attributes
})
if signing_private_key.algorithm == 'rsa':
sign_func = asymmetric.rsa_pkcs1v15_sign
elif signing_private_key.algorithm == 'dsa':
sign_func = asymmetric.dsa_sign
elif signing_private_key.algorithm == 'ec':
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
signing_private_key = asymmetric.load_private_key(signing_private_key)
signature = sign_func(signing_private_key, certification_request_info.dump(), self._hash_algo)
return csr.CertificationRequest({
'certification_request_info': certification_request_info,
'signature_algorithm': {
'algorithm': signature_algorithm_id,
},
'signature': signature
}) | Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request | entailment |
def compute_metrics_cv(self, X, y, **kwargs):
'''Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels
'''
# compute scores
results = self.cv_score_mean(X, y)
return results | Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels | entailment |
def cv_score_mean(self, X, y):
'''Compute mean score across cross validation folds.
Split data and labels into cross validation folds and fit the model for
each fold. Then, for each scoring type in scorings, compute the score.
Finally, average the scores across folds. Returns a dictionary mapping
scoring to score.
Args:
X (np.array): data
y (np.array): labels
scorings (List[str]): scoring types
'''
X, y = self._format_inputs(X, y)
if self.problem_type.binary_classification:
kf = StratifiedKFold(
shuffle=True, random_state=RANDOM_STATE + 3)
elif self.problem_type.multi_classification:
self.target_type_transformer.inverse_transform(y)
transformer = self.target_type_transformer
kf = StratifiedKFoldMultiClassIndicator(
transformer, shuffle=True, n_splits=3,
random_state=RANDOM_STATE + 3)
elif self.problem_type.regression:
kf = KFold(shuffle=True, n_splits=3, random_state=RANDOM_STATE + 4)
else:
raise NotImplementedError
scoring = {
scorer_info.name: scorer_info.scorer
for scorer_info in self.scorers_info
}
cv_results = cross_validate(
self.estimator, X, y,
scoring=scoring, cv=kf, return_train_score=False)
# post-processing
results = self._process_cv_results(cv_results)
return results | Compute mean score across cross validation folds.
Split data and labels into cross validation folds and fit the model for
each fold. Then, for each scoring type in scorings, compute the score.
Finally, average the scores across folds. Returns a dictionary mapping
scoring to score.
Args:
X (np.array): data
y (np.array): labels
scorings (List[str]): scoring types | entailment |
def get_contrib_features(project_root):
"""Get contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project_root (str, path-like): Path to project root
Returns:
List[ballet.Feature]: list of Feature objects
"""
# TODO Project should require ModuleType
project = Project(project_root)
contrib = project._resolve('.features.contrib')
return _get_contrib_features(contrib) | Get contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project_root (str, path-like): Path to project root
Returns:
List[ballet.Feature]: list of Feature objects | entailment |
def _get_contrib_features(module):
"""Get contributed features from within given module
Be very careful with untrusted code. The module/package will be
walked, every submodule will be imported, and all the code therein will be
executed. But why would you be trying to import from an untrusted package
anyway?
Args:
contrib (module): module (standalone or package) that contains feature
definitions
Returns:
List[Feature]: list of features
"""
if isinstance(module, types.ModuleType):
# any module that has a __path__ attribute is also a package
if hasattr(module, '__path__'):
yield from _get_contrib_features_from_package(module)
else:
yield _get_contrib_feature_from_module(module)
else:
raise ValueError('Input is not a module') | Get contributed features from within given module
Be very careful with untrusted code. The module/package will be
walked, every submodule will be imported, and all the code therein will be
executed. But why would you be trying to import from an untrusted package
anyway?
Args:
contrib (module): module (standalone or package) that contains feature
definitions
Returns:
List[Feature]: list of features | entailment |
def quickstart():
"""Generate a brand-new ballet project"""
import ballet.templating
import ballet.util.log
ballet.util.log.enable(level='INFO',
format=ballet.util.log.SIMPLE_LOG_FORMAT,
echo=False)
ballet.templating.render_project_template() | Generate a brand-new ballet project | entailment |
def update_project_template(push):
"""Update an existing ballet project from the upstream template"""
import ballet.update
import ballet.util.log
ballet.util.log.enable(level='INFO',
format=ballet.util.log.SIMPLE_LOG_FORMAT,
echo=False)
ballet.update.update_project_template(push=push) | Update an existing ballet project from the upstream template | entailment |
def start_new_feature():
"""Start working on a new feature from a template"""
import ballet.templating
import ballet.util.log
ballet.util.log.enable(level='INFO',
format=ballet.util.log.SIMPLE_LOG_FORMAT,
echo=False)
ballet.templating.start_new_feature() | Start working on a new feature from a template | entailment |
def write_tabular(obj, filepath):
"""Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl'
"""
_, fn, ext = splitext2(filepath)
if ext == '.h5':
_write_tabular_h5(obj, filepath)
elif ext == '.pkl':
_write_tabular_pickle(obj, filepath)
else:
raise NotImplementedError | Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl' | entailment |
def read_tabular(filepath):
"""Read tabular object in HDF5 or pickle format
Args:
filepath (path-like): path to read to; must end in '.h5' or '.pkl'
"""
_, fn, ext = splitext2(filepath)
if ext == '.h5':
return _read_tabular_h5(filepath)
elif ext == '.pkl':
return _read_tabular_pickle(filepath)
else:
raise NotImplementedError | Read tabular object in HDF5 or pickle format
Args:
filepath (path-like): path to read to; must end in '.h5' or '.pkl' | entailment |
def load_table_from_config(input_dir, config):
"""Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame
"""
path = pathlib.Path(input_dir).joinpath(config['path'])
kwargs = config['pd_read_kwargs']
return pd.read_csv(path, **kwargs) | Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame | entailment |
def validate_feature_api(project, force=False):
"""Validate feature API"""
if not force and not project.on_pr():
raise SkippedValidationTest('Not on PR')
validator = FeatureApiValidator(project)
result = validator.validate()
if not result:
raise InvalidFeatureApi | Validate feature API | entailment |
def evaluate_feature_performance(project, force=False):
"""Evaluate feature performance"""
if not force and not project.on_pr():
raise SkippedValidationTest('Not on PR')
out = project.build()
X_df, y, features = out['X_df'], out['y'], out['features']
proposed_feature = get_proposed_feature(project)
accepted_features = get_accepted_features(features, proposed_feature)
evaluator = GFSSFAcceptanceEvaluator(X_df, y, accepted_features)
accepted = evaluator.judge(proposed_feature)
if not accepted:
raise FeatureRejected | Evaluate feature performance | entailment |
def prune_existing_features(project, force=False):
"""Prune existing features"""
if not force and not project.on_master_after_merge():
raise SkippedValidationTest('Not on master')
out = project.build()
X_df, y, features = out['X_df'], out['y'], out['features']
proposed_feature = get_proposed_feature(project)
accepted_features = get_accepted_features(features, proposed_feature)
evaluator = GFSSFPruningEvaluator(
X_df, y, accepted_features, proposed_feature)
redundant_features = evaluator.prune()
# propose removal
for feature in redundant_features:
logger.debug(PRUNER_MESSAGE + feature.source)
return redundant_features | Prune existing features | entailment |
def validate(package, test_target_type=None):
"""Entrypoint for ./validate.py script in ballet projects"""
project = Project(package)
if test_target_type is None:
test_target_type = detect_target_type()
if test_target_type == BalletTestTypes.PROJECT_STRUCTURE_VALIDATION:
check_project_structure(project)
elif test_target_type == BalletTestTypes.FEATURE_API_VALIDATION:
validate_feature_api(project)
elif test_target_type == BalletTestTypes.FEATURE_ACCEPTANCE_EVALUTION:
evaluate_feature_performance(project)
elif test_target_type == (
BalletTestTypes.FEATURE_PRUNING_EVALUATION):
prune_existing_features(project)
else:
raise NotImplementedError(
'Unsupported test target type: {test_target_type}'
.format(test_target_type=test_target_type)) | Entrypoint for ./validate.py script in ballet projects | entailment |
def spliceext(filepath, s):
"""Add s into filepath before the extension
Args:
filepath (str, path): file path
s (str): string to splice
Returns:
str
"""
root, ext = os.path.splitext(safepath(filepath))
return root + s + ext | Add s into filepath before the extension
Args:
filepath (str, path): file path
s (str): string to splice
Returns:
str | entailment |
def replaceext(filepath, new_ext):
"""Replace any existing file extension with a new one
Example::
>>> replaceext('/foo/bar.txt', 'py')
'/foo/bar.py'
>>> replaceext('/foo/bar.txt', '.doc')
'/foo/bar.doc'
Args:
filepath (str, path): file path
new_ext (str): new file extension; if a leading dot is not included,
it will be added.
Returns:
Tuple[str]
"""
if new_ext and new_ext[0] != '.':
new_ext = '.' + new_ext
root, ext = os.path.splitext(safepath(filepath))
return root + new_ext | Replace any existing file extension with a new one
Example::
>>> replaceext('/foo/bar.txt', 'py')
'/foo/bar.py'
>>> replaceext('/foo/bar.txt', '.doc')
'/foo/bar.doc'
Args:
filepath (str, path): file path
new_ext (str): new file extension; if a leading dot is not included,
it will be added.
Returns:
Tuple[str] | entailment |
def splitext2(filepath):
"""Split filepath into root, filename, ext
Args:
filepath (str, path): file path
Returns:
str
"""
root, filename = os.path.split(safepath(filepath))
filename, ext = os.path.splitext(safepath(filename))
return root, filename, ext | Split filepath into root, filename, ext
Args:
filepath (str, path): file path
Returns:
str | entailment |
def isemptyfile(filepath):
"""Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool
"""
exists = os.path.exists(safepath(filepath))
if exists:
filesize = os.path.getsize(safepath(filepath))
return filesize == 0
else:
return False | Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool | entailment |
def synctree(src, dst, onexist=None):
"""Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
"""
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if not src.is_dir():
raise ValueError
if dst.exists() and not dst.is_dir():
raise ValueError
if onexist is None:
def onexist(): pass
_synctree(src, dst, onexist) | Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument | entailment |
def calculate_disc_entropy(X):
r"""Calculates the exact Shannon entropy of a discrete dataset,
using empirical probabilities according to the equation:
$ H(X) = -\sum(c \in X) p(c) \times \log(p(c)) $
Where $ p(c) $ is calculated as the frequency of c in X.
If X's columns logically represent continuous features,
it is better to use the estimate_cont_entropy function.
If you are unsure of which to use, estimate_entropy can
take datasets of mixed discrete and continuous functions.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
Returns:
float: A floating-point number representing the dataset entropy.
"""
X = asarray2d(X)
n_samples, _ = X.shape
_, counts = np.unique(X, axis=0, return_counts=True)
empirical_p = counts * 1.0 / n_samples
log_p = np.log(empirical_p)
entropy = -np.sum(np.multiply(empirical_p, log_p))
return entropy | r"""Calculates the exact Shannon entropy of a discrete dataset,
using empirical probabilities according to the equation:
$ H(X) = -\sum(c \in X) p(c) \times \log(p(c)) $
Where $ p(c) $ is calculated as the frequency of c in X.
If X's columns logically represent continuous features,
it is better to use the estimate_cont_entropy function.
If you are unsure of which to use, estimate_entropy can
take datasets of mixed discrete and continuous functions.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
Returns:
float: A floating-point number representing the dataset entropy. | entailment |
def estimate_cont_entropy(X, epsilon=None):
"""Estimate the Shannon entropy of a discrete dataset.
Based off the Kraskov Estimator [1] and Kozachenko [2]
estimators for a dataset's Shannon entropy.
The function relies on nonparametric methods based on entropy
estimation from k-nearest neighbors distances as proposed
in [1] and augmented in [2] for mutual information estimation.
If X's columns logically represent discrete features,
it is better to use the calculate_disc_entropy function.
If you are unsure of which to use, estimate_entropy can
take datasets of mixed discrete and continuous functions.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number. If epsilon is not provided,
this will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the Kraskov
entropy estimator. The bias is cancelled out when computing
mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
"""
X = asarray2d(X)
n_samples, n_features = X.shape
if n_samples <= 1:
return 0
nn = NearestNeighbors(
metric='chebyshev',
n_neighbors=NUM_NEIGHBORS,
algorithm='kd_tree')
nn.fit(X)
if epsilon is None:
# If epsilon is not provided, revert to the Kozachenko Estimator
n_neighbors = NUM_NEIGHBORS
radius = 0
# While we have non-zero radii, calculate for a larger k
# Potentially expensive
while not np.all(radius) and n_neighbors < n_samples:
distances, _ = nn.kneighbors(
n_neighbors=n_neighbors, return_distance=True)
radius = distances[:, -1]
n_neighbors += 1
if n_neighbors == n_samples:
# This case only happens if all samples are the same
# e.g. this isn't a continuous sample...
raise ValueError('Should not have discrete column to estimate')
return -digamma(n_neighbors) + digamma(n_samples) + \
n_features * np.mean(np.log(2 * radius))
else:
ind = nn.radius_neighbors(
radius=epsilon.ravel(),
return_distance=False)
nx = np.array([i.size for i in ind])
return - np.mean(digamma(nx + 1)) + digamma(n_samples) | Estimate the Shannon entropy of a discrete dataset.
Based off the Kraskov Estimator [1] and Kozachenko [2]
estimators for a dataset's Shannon entropy.
The function relies on nonparametric methods based on entropy
estimation from k-nearest neighbors distances as proposed
in [1] and augmented in [2] for mutual information estimation.
If X's columns logically represent discrete features,
it is better to use the calculate_disc_entropy function.
If you are unsure of which to use, estimate_entropy can
take datasets of mixed discrete and continuous functions.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number. If epsilon is not provided,
this will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the Kraskov
entropy estimator. The bias is cancelled out when computing
mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 | entailment |
def estimate_entropy(X, epsilon=None):
r"""Estimate a dataset's Shannon entropy.
This function can take datasets of mixed discrete and continuous
features, and uses a set of heuristics to determine which functions
to apply to each.
Because this function is a subroutine in a mutual information estimator,
we employ the Kozachenko Estimator[1] for continuous features when this
function is _not_ used for mutual information and an adaptation of the
Kraskov Estimator[2] when it is.
Let X be made of continuous features c and discrete features d.
To deal with both continuous and discrete features, We use the
following reworking of entropy:
$ H(X) = H(c,d) = \sum_{x \in d} p(x) \times H(c(x)) + H(d) $
Where c(x) is a dataset that represents the rows of the continuous dataset
in the same row as a discrete column with value x in the original dataset.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number representing the entropy in X.
If the dataset is fully discrete, an exact calculation is done.
If this is not the case and epsilon is not provided, this
will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the
Kraskov entropy estimator. The bias is cancelled out when
computing mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16.
"""
X = asarray2d(X)
n_samples, n_features = X.shape
if n_features < 1:
return 0
disc_mask = _get_discrete_columns(X)
cont_mask = ~disc_mask
# If our dataset is fully discrete/continuous, do something easier
if np.all(disc_mask):
return calculate_disc_entropy(X)
elif np.all(cont_mask):
return estimate_cont_entropy(X, epsilon)
# Separate the dataset into discrete and continuous datasets d,c
disc_features = asarray2d(X[:, disc_mask])
cont_features = asarray2d(X[:, cont_mask])
entropy = 0
uniques, counts = np.unique(disc_features, axis=0, return_counts=True)
empirical_p = counts / n_samples
# $\sum_{x \in d} p(x) \times H(c(x))$
for i in range(counts.size):
unique_mask = np.all(disc_features == uniques[i], axis=1)
selected_cont_samples = cont_features[unique_mask, :]
if epsilon is None:
selected_epsilon = None
else:
selected_epsilon = epsilon[unique_mask, :]
conditional_cont_entropy = estimate_cont_entropy(
selected_cont_samples, selected_epsilon)
entropy += empirical_p[i] * conditional_cont_entropy
# H(d)
entropy += calculate_disc_entropy(disc_features)
if epsilon is None:
entropy = max(0, entropy)
return entropy | r"""Estimate a dataset's Shannon entropy.
This function can take datasets of mixed discrete and continuous
features, and uses a set of heuristics to determine which functions
to apply to each.
Because this function is a subroutine in a mutual information estimator,
we employ the Kozachenko Estimator[1] for continuous features when this
function is _not_ used for mutual information and an adaptation of the
Kraskov Estimator[2] when it is.
Let X be made of continuous features c and discrete features d.
To deal with both continuous and discrete features, We use the
following reworking of entropy:
$ H(X) = H(c,d) = \sum_{x \in d} p(x) \times H(c(x)) + H(d) $
Where c(x) is a dataset that represents the rows of the continuous dataset
in the same row as a discrete column with value x in the original dataset.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number representing the entropy in X.
If the dataset is fully discrete, an exact calculation is done.
If this is not the case and epsilon is not provided, this
will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the
Kraskov entropy estimator. The bias is cancelled out when
computing mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16. | entailment |
def _calculate_epsilon(X):
"""Calculates epsilon, a subroutine for the Kraskov Estimator [1]
Represents the chebyshev distance of each dataset element to its
K-th nearest neighbor.
Args:
X (array-like): An array with shape (n_samples, n_features)
Returns:
array-like: An array with shape (n_samples, 1) representing
epsilon as described above.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
disc_mask = _get_discrete_columns(X)
if np.all(disc_mask):
# if all discrete columns, there's no point getting epsilon
return 0
cont_features = X[:, ~disc_mask]
nn = NearestNeighbors(metric='chebyshev', n_neighbors=NUM_NEIGHBORS)
nn.fit(cont_features)
distances, _ = nn.kneighbors()
epsilon = np.nextafter(distances[:, -1], 0)
return asarray2d(epsilon) | Calculates epsilon, a subroutine for the Kraskov Estimator [1]
Represents the chebyshev distance of each dataset element to its
K-th nearest neighbor.
Args:
X (array-like): An array with shape (n_samples, n_features)
Returns:
array-like: An array with shape (n_samples, 1) representing
epsilon as described above.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | entailment |
def estimate_conditional_information(x, y, z):
""" Estimate the conditional mutual information of three datasets.
Conditional mutual information is the
mutual information of two datasets, given a third:
$ I(x;y|z) = H(x,z) + H(y,z) - H(x,y,z) - H(z) $
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Equation 8 still holds because the epsilon terms cancel out:
Let d_x, represent the dimensionality of the continuous portion of x.
Then, we see that:
d_xz + d_yz - d_xyz - d_z =
(d_x + d_z) + (d_y + d_z) - (d_x + d_y + d_z) - d_z = 0
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
z (array-like): An array with shape (n_samples, n_features_z).
This is the dataset being conditioned on.
Returns:
float: A floating point number representing the conditional
mutual information of x and y given z. This calculation is
*exact* for entirely discrete datasets and *approximate*
if there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
xz = np.concatenate((x, z), axis=1)
yz = np.concatenate((y, z), axis=1)
xyz = np.concatenate((xz, y), axis=1)
epsilon = _calculate_epsilon(xyz)
h_xz = estimate_entropy(xz, epsilon)
h_yz = estimate_entropy(yz, epsilon)
h_xyz = estimate_entropy(xyz, epsilon)
h_z = estimate_entropy(z, epsilon)
return max(0, h_xz + h_yz - h_xyz - h_z) | Estimate the conditional mutual information of three datasets.
Conditional mutual information is the
mutual information of two datasets, given a third:
$ I(x;y|z) = H(x,z) + H(y,z) - H(x,y,z) - H(z) $
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Equation 8 still holds because the epsilon terms cancel out:
Let d_x, represent the dimensionality of the continuous portion of x.
Then, we see that:
d_xz + d_yz - d_xyz - d_z =
(d_x + d_z) + (d_y + d_z) - (d_x + d_y + d_z) - d_z = 0
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
z (array-like): An array with shape (n_samples, n_features_z).
This is the dataset being conditioned on.
Returns:
float: A floating point number representing the conditional
mutual information of x and y given z. This calculation is
*exact* for entirely discrete datasets and *approximate*
if there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | entailment |
def estimate_mutual_information(x, y):
"""Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
xy = np.concatenate((x, y), axis=1)
epsilon = _calculate_epsilon(xy)
h_x = estimate_entropy(x, epsilon)
h_y = estimate_entropy(y, epsilon)
h_xy = estimate_entropy(xy, epsilon)
return max(0, h_x + h_y - h_xy) | Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | entailment |
def get_diff_endpoints_from_commit_range(repo, commit_range):
"""Get endpoints of a diff given a commit range
The resulting endpoints can be diffed directly::
a, b = get_diff_endpoints_from_commit_range(repo, commit_range)
a.diff(b)
For details on specifying git diffs, see ``git diff --help``.
For details on specifying revisions, see ``git help revisions``.
Args:
repo (git.Repo): Repo object initialized with project root
commit_range (str): commit range as would be interpreted by ``git
diff`` command. Unfortunately only patterns of the form ``a..b``
and ``a...b`` are accepted. Note that the latter pattern finds the
merge-base of a and b and uses it as the starting point for the
diff.
Returns:
Tuple[git.Commit, git.Commit]: starting commit, ending commit (
inclusive)
Raises:
ValueError: commit_range is empty or ill-formed
See also:
<https://stackoverflow.com/q/7251477>
"""
if not commit_range:
raise ValueError('commit_range cannot be empty')
result = re_find(COMMIT_RANGE_REGEX, commit_range)
if not result:
raise ValueError(
'Expected diff str of the form \'a..b\' or \'a...b\' (got {})'
.format(commit_range))
a, b = result['a'], result['b']
a, b = repo.rev_parse(a), repo.rev_parse(b)
if result['thirddot']:
a = one_or_raise(repo.merge_base(a, b))
return a, b | Get endpoints of a diff given a commit range
The resulting endpoints can be diffed directly::
a, b = get_diff_endpoints_from_commit_range(repo, commit_range)
a.diff(b)
For details on specifying git diffs, see ``git diff --help``.
For details on specifying revisions, see ``git help revisions``.
Args:
repo (git.Repo): Repo object initialized with project root
commit_range (str): commit range as would be interpreted by ``git
diff`` command. Unfortunately only patterns of the form ``a..b``
and ``a...b`` are accepted. Note that the latter pattern finds the
merge-base of a and b and uses it as the starting point for the
diff.
Returns:
Tuple[git.Commit, git.Commit]: starting commit, ending commit (
inclusive)
Raises:
ValueError: commit_range is empty or ill-formed
See also:
<https://stackoverflow.com/q/7251477> | entailment |
def set_config_variables(repo, variables):
"""Set config variables
Args:
repo (git.Repo): repo
variables (dict): entries of the form 'user.email': 'you@example.com'
"""
with repo.config_writer() as writer:
for k, value in variables.items():
section, option = k.split('.')
writer.set_value(section, option, value)
writer.release() | Set config variables
Args:
repo (git.Repo): repo
variables (dict): entries of the form 'user.email': 'you@example.com' | entailment |
def validate(self):
"""Collect and validate all new features"""
changes = self.change_collector.collect_changes()
features = []
imported_okay = True
for importer, modname, modpath in changes.new_feature_info:
try:
mod = importer()
features.extend(_get_contrib_features(mod))
except (ImportError, SyntaxError):
logger.info(
'Failed to import module at {}'
.format(modpath))
logger.exception('Exception details: ')
imported_okay = False
if not imported_okay:
return False
# if no features were added at all, reject
if not features:
logger.info('Failed to collect any new features.')
return False
return all(
validate_feature_api(feature, self.X, self.y, subsample=False)
for feature in features
) | Collect and validate all new features | entailment |
def load_config_at_path(path):
"""Load config at exact path
Args:
path (path-like): path to config file
Returns:
dict: config dict
"""
if path.exists() and path.is_file():
with path.open('r') as f:
return yaml.load(f, Loader=yaml.SafeLoader)
else:
raise ConfigurationError("Couldn't find ballet.yml config file.") | Load config at exact path
Args:
path (path-like): path to config file
Returns:
dict: config dict | entailment |
def config_get(config, *path, default=None):
"""Get a configuration option following a path through the config
Example usage:
>>> config_get(config,
'problem', 'problem_type_details', 'scorer',
default='accuracy')
Args:
config (dict): config dict
*path (list[str]): List of config sections and options to follow.
default (default=None): A default value to return in the case that
the option does not exist.
"""
o = object()
result = get_in(config, path, default=o)
if result is not o:
return result
else:
return default | Get a configuration option following a path through the config
Example usage:
>>> config_get(config,
'problem', 'problem_type_details', 'scorer',
default='accuracy')
Args:
config (dict): config dict
*path (list[str]): List of config sections and options to follow.
default (default=None): A default value to return in the case that
the option does not exist. | entailment |
def make_config_get(conf_path):
"""Return a function to get configuration options for a specific project
Args:
conf_path (path-like): path to project's conf file (i.e. foo.conf
module)
"""
project_root = _get_project_root_from_conf_path(conf_path)
config = load_config_in_dir(project_root)
return partial(config_get, config) | Return a function to get configuration options for a specific project
Args:
conf_path (path-like): path to project's conf file (i.e. foo.conf
module) | entailment |
def relative_to_contrib(diff, project):
"""Compute relative path of changed file to contrib dir
Args:
diff (git.diff.Diff): file diff
project (Project): project
Returns:
Path
"""
path = pathlib.Path(diff.b_path)
contrib_path = project.contrib_module_path
return path.relative_to(contrib_path) | Compute relative path of changed file to contrib dir
Args:
diff (git.diff.Diff): file diff
project (Project): project
Returns:
Path | entailment |
def pr_num(self):
"""Return the PR number or None if not on a PR"""
result = get_pr_num(repo=self.repo)
if result is None:
result = get_travis_pr_num()
return result | Return the PR number or None if not on a PR | entailment |
def branch(self):
"""Return whether the project is on master branch"""
result = get_branch(repo=self.repo)
if result is None:
result = get_travis_branch()
return result | Return whether the project is on master branch | entailment |
def path(self):
"""Return the project path (aka project root)
If ``package.__file__`` is ``/foo/foo/__init__.py``, then project.path
should be ``/foo``.
"""
return pathlib.Path(self.package.__file__).resolve().parent.parent | Return the project path (aka project root)
If ``package.__file__`` is ``/foo/foo/__init__.py``, then project.path
should be ``/foo``. | entailment |
def asarray2d(a):
"""Cast to 2d array"""
arr = np.asarray(a)
if arr.ndim == 1:
arr = arr.reshape(-1, 1)
return arr | Cast to 2d array | entailment |
def get_arr_desc(arr):
"""Get array description, in the form '<array type> <array shape>'"""
type_ = type(arr).__name__ # see also __qualname__
shape = getattr(arr, 'shape', None)
if shape is not None:
desc = '{type_} {shape}'
else:
desc = '{type_} <no shape>'
return desc.format(type_=type_, shape=shape) | Get array description, in the form '<array type> <array shape> | entailment |
def indent(text, n=4):
"""Indent each line of text by n spaces"""
_indent = ' ' * n
return '\n'.join(_indent + line for line in text.split('\n')) | Indent each line of text by n spaces | entailment |
def has_nans(obj):
"""Check if obj has any NaNs
Compatible with different behavior of np.isnan, which sometimes applies
over all axes (py35, py35) and sometimes does not (py34).
"""
nans = np.isnan(obj)
while np.ndim(nans):
nans = np.any(nans)
return bool(nans) | Check if obj has any NaNs
Compatible with different behavior of np.isnan, which sometimes applies
over all axes (py35, py35) and sometimes does not (py34). | entailment |
def needs_path(f):
"""Wraps a function that accepts path-like to give it a pathlib.Path"""
@wraps(f)
def wrapped(pathlike, *args, **kwargs):
path = pathlib.Path(pathlike)
return f(path, *args, **kwargs)
return wrapped | Wraps a function that accepts path-like to give it a pathlib.Path | entailment |
def import_module_at_path(modname, modpath):
"""Import module from path that may not be on system path
Args:
modname (str): module name from package root, e.g. foo.bar
modpath (str): absolute path to module itself,
e.g. /home/user/foo/bar.py. In the case of a module that is a
package, then the path should be specified as '/home/user/foo' and
a file '/home/user/foo/__init__.py' *must be present* or the import
will fail.
Examples:
>>> modname = 'foo.bar.baz'
>>> modpath = '/home/user/foo/bar/baz.py'
>>> import_module_at_path(modname, modpath)
<module 'foo.bar.baz' from '/home/user/foo/bar/baz.py'>
>>> modname = 'foo.bar'
>>> modpath = '/home/user/foo/bar'
>>> import_module_at_path(modname, modpath)
<module 'foo.bar' from '/home/user/foo/bar/__init__.py'>
"""
# TODO just keep navigating up in the source tree until an __init__.py is
# not found?
modpath = pathlib.Path(modpath).resolve()
if modpath.name == '__init__.py':
# TODO improve debugging output with recommend change
raise ValueError('Don\'t provide the __init__.py!')
def is_package(modpath):
return modpath.suffix != '.py'
def has_init(dir):
return dir.joinpath('__init__.py').is_file()
def has_package_structure(modname, modpath):
modparts = modname.split('.')
n = len(modparts)
dir = modpath
if not is_package(modpath):
n = n - 1
dir = dir.parent
while n > 0:
if not has_init(dir):
return False
dir = dir.parent
n = n - 1
return True
if not has_package_structure(modname, modpath):
raise ImportError('Module does not have valid package structure.')
parentpath = str(pathlib.Path(modpath).parent)
finder = pkgutil.get_importer(parentpath)
loader = finder.find_module(modname)
if loader is None:
raise ImportError(
'Failed to find loader for module {} within dir {}'
.format(modname, parentpath))
mod = loader.load_module(modname)
# TODO figure out what to do about this
assert mod.__name__ == modname
return mod | Import module from path that may not be on system path
Args:
modname (str): module name from package root, e.g. foo.bar
modpath (str): absolute path to module itself,
e.g. /home/user/foo/bar.py. In the case of a module that is a
package, then the path should be specified as '/home/user/foo' and
a file '/home/user/foo/__init__.py' *must be present* or the import
will fail.
Examples:
>>> modname = 'foo.bar.baz'
>>> modpath = '/home/user/foo/bar/baz.py'
>>> import_module_at_path(modname, modpath)
<module 'foo.bar.baz' from '/home/user/foo/bar/baz.py'>
>>> modname = 'foo.bar'
>>> modpath = '/home/user/foo/bar'
>>> import_module_at_path(modname, modpath)
<module 'foo.bar' from '/home/user/foo/bar/__init__.py'> | entailment |
def relpath_to_modname(relpath):
"""Convert relative path to module name
Within a project, a path to the source file is uniquely identified with a
module name. Relative paths of the form 'foo/bar' are *not* converted to
module names 'foo.bar', because (1) they identify directories, not regular
files, and (2) already 'foo/bar/__init__.py' would claim that conversion.
Args:
relpath (str): Relative path from some location on sys.path
Example:
>>> relpath_to_modname('ballet/util/_util.py')
'ballet.util._util'
"""
# don't try to resolve!
p = pathlib.Path(relpath)
if p.name == '__init__.py':
p = p.parent
elif p.suffix == '.py':
p = p.with_suffix('')
else:
msg = 'Cannot convert a non-python file to a modname'
msg_detail = 'The relpath given is: {}'.format(relpath)
logger.error(msg + '\n' + msg_detail)
raise ValueError(msg)
return '.'.join(p.parts) | Convert relative path to module name
Within a project, a path to the source file is uniquely identified with a
module name. Relative paths of the form 'foo/bar' are *not* converted to
module names 'foo.bar', because (1) they identify directories, not regular
files, and (2) already 'foo/bar/__init__.py' would claim that conversion.
Args:
relpath (str): Relative path from some location on sys.path
Example:
>>> relpath_to_modname('ballet/util/_util.py')
'ballet.util._util' | entailment |
def modname_to_relpath(modname, project_root=None, add_init=True):
"""Convert module name to relative path.
The project root is usually needed to detect if the module is a package, in
which case the relevant file is the `__init__.py` within the subdirectory.
Example:
>>> modname_to_relpath('foo.features')
'foo/features.py'
>>> modname_to_relpath('foo.features',
project_root='/path/to/project')
'foo/features/__init__.py'
Args:
modname (str): Module name, e.g. `os.path`
project_root (str): Path to project root
add_init (bool): Whether to add `__init__.py` to the path of modules
that are packages. Defaults to True
Returns:
str
"""
parts = modname.split('.')
relpath = pathlib.Path(*parts)
# is the module a package? if so, the relpath identifies a directory
# it is easier to check for whether a file is a directory than to try to
# import the module dynamically and see whether it is a package
if project_root is not None:
relpath_resolved = pathlib.Path(project_root).joinpath(relpath)
else:
relpath_resolved = relpath
if relpath_resolved.is_dir():
if add_init:
relpath = relpath.joinpath('__init__.py')
else:
relpath = str(relpath) + '.py'
return str(relpath) | Convert module name to relative path.
The project root is usually needed to detect if the module is a package, in
which case the relevant file is the `__init__.py` within the subdirectory.
Example:
>>> modname_to_relpath('foo.features')
'foo/features.py'
>>> modname_to_relpath('foo.features',
project_root='/path/to/project')
'foo/features/__init__.py'
Args:
modname (str): Module name, e.g. `os.path`
project_root (str): Path to project root
add_init (bool): Whether to add `__init__.py` to the path of modules
that are packages. Defaults to True
Returns:
str | entailment |
def check(self, feature):
"""Check that the feature's `input` is a str or Iterable[str]"""
input = feature.input
is_str = isa(str)
is_nested_str = all_fn(
iterable, lambda x: all(is_str, x))
assert is_str(input) or is_nested_str(input) | Check that the feature's `input` is a str or Iterable[str] | entailment |
def check(self, feature):
"""Check that the feature has a fit/transform/fit_tranform interface"""
assert hasattr(feature.transformer, 'fit')
assert hasattr(feature.transformer, 'transform')
assert hasattr(feature.transformer, 'fit_transform') | Check that the feature has a fit/transform/fit_tranform interface | entailment |
def check(self, feature):
"""Check that fit can be called on reference data"""
mapper = feature.as_dataframe_mapper()
mapper.fit(self.X, y=self.y) | Check that fit can be called on reference data | entailment |
def check(self, feature):
"""Check that fit_transform can be called on reference data"""
mapper = feature.as_dataframe_mapper()
mapper.fit_transform(self.X, y=self.y) | Check that fit_transform can be called on reference data | entailment |
def check(self, feature):
"""Check that the dimensions of the transformed data are correct
For input X, an n x p array, a n x q array should be produced,
where q is the number of features produced by the logical feature.
"""
mapper = feature.as_dataframe_mapper()
X = mapper.fit_transform(self.X, y=self.y)
assert self.X.shape[0] == X.shape[0] | Check that the dimensions of the transformed data are correct
For input X, an n x p array, a n x q array should be produced,
where q is the number of features produced by the logical feature. | entailment |
def check(self, feature):
"""Check that the feature can be pickled
This is needed for saving the pipeline to disk
"""
try:
buf = io.BytesIO()
pickle.dump(feature, buf, protocol=pickle.HIGHEST_PROTOCOL)
buf.seek(0)
new_feature = pickle.load(buf)
assert new_feature is not None
assert isinstance(new_feature, Feature)
finally:
buf.close() | Check that the feature can be pickled
This is needed for saving the pipeline to disk | entailment |
def check(self, feature):
"""Check that the output of the transformer has no missing values"""
mapper = feature.as_dataframe_mapper()
X = mapper.fit_transform(self.X, y=self.y)
assert not np.any(np.isnan(X)) | Check that the output of the transformer has no missing values | entailment |
def make_multi_lagger(lags, groupby_kwargs=None):
"""Return a union of transformers that apply different lags
Args:
lags (Collection[int]): collection of lags to apply
groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby
"""
laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]
feature_union = FeatureUnion([
(repr(lagger), lagger) for lagger in laggers
])
return feature_union | Return a union of transformers that apply different lags
Args:
lags (Collection[int]): collection of lags to apply
groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby | entailment |
def start_new_feature(**cc_kwargs):
"""Start a new feature within a ballet project
Renders the feature template into a temporary directory, then copies the
feature files into the proper path within the contrib directory.
Args:
**cc_kwargs: options for the cookiecutter template
Raises:
ballet.exc.BalletError: the new feature has the same name as an
existing one
"""
project = Project.from_path(pathlib.Path.cwd().resolve())
contrib_dir = project.get('contrib', 'module_path')
with tempfile.TemporaryDirectory() as tempdir:
# render feature template
output_dir = tempdir
cc_kwargs['output_dir'] = output_dir
rendered_dir = render_feature_template(**cc_kwargs)
# copy into contrib dir
src = rendered_dir
dst = contrib_dir
synctree(src, dst, onexist=_fail_if_feature_exists)
logger.info('Start new feature successful.') | Start a new feature within a ballet project
Renders the feature template into a temporary directory, then copies the
feature files into the proper path within the contrib directory.
Args:
**cc_kwargs: options for the cookiecutter template
Raises:
ballet.exc.BalletError: the new feature has the same name as an
existing one | entailment |
def get_proposed_feature(project):
"""Get the proposed feature
The path of the proposed feature is determined by diffing the project
against a comparison branch, such as master. The feature is then imported
from that path and returned.
Args:
project (ballet.project.Project): project info
Raises:
ballet.exc.BalletError: more than one feature collected
"""
change_collector = ChangeCollector(project)
collected_changes = change_collector.collect_changes()
try:
new_feature_info = one_or_raise(collected_changes.new_feature_info)
importer, _, _ = new_feature_info
except ValueError:
raise BalletError('Too many features collected')
module = importer()
feature = _get_contrib_feature_from_module(module)
return feature | Get the proposed feature
The path of the proposed feature is determined by diffing the project
against a comparison branch, such as master. The feature is then imported
from that path and returned.
Args:
project (ballet.project.Project): project info
Raises:
ballet.exc.BalletError: more than one feature collected | entailment |
def get_accepted_features(features, proposed_feature):
"""Deselect candidate features from list of all features
Args:
features (List[Feature]): collection of all features in the ballet
project: both accepted features and candidate ones that have not
been accepted
proposed_feature (Feature): candidate feature that has not been
accepted
Returns:
List[Feature]: list of features with the proposed feature not in it.
Raises:
ballet.exc.BalletError: Could not deselect exactly the proposed
feature.
"""
def eq(feature):
"""Features are equal if they have the same source
At least in this implementation...
"""
return feature.source == proposed_feature.source
# deselect features that match the proposed feature
result = lfilter(complement(eq), features)
if len(features) - len(result) == 1:
return result
elif len(result) == len(features):
raise BalletError(
'Did not find match for proposed feature within \'contrib\'')
else:
raise BalletError(
'Unexpected condition (n_features={}, n_result={})'
.format(len(features), len(result))) | Deselect candidate features from list of all features
Args:
features (List[Feature]): collection of all features in the ballet
project: both accepted features and candidate ones that have not
been accepted
proposed_feature (Feature): candidate feature that has not been
accepted
Returns:
List[Feature]: list of features with the proposed feature not in it.
Raises:
ballet.exc.BalletError: Could not deselect exactly the proposed
feature. | entailment |
def collect_changes(self):
"""Collect file and feature changes
Steps
1. Collects the files that have changed in this pull request as
compared to a comparison branch.
2. Categorize these file changes into admissible or inadmissible file
changes. Admissible file changes solely contribute python files to
the contrib subdirectory.
3. Collect features from admissible new files.
Returns:
CollectedChanges
"""
file_diffs = self._collect_file_diffs()
candidate_feature_diffs, valid_init_diffs, inadmissible_diffs = \
self._categorize_file_diffs(file_diffs)
new_feature_info = self._collect_feature_info(candidate_feature_diffs)
return CollectedChanges(
file_diffs, candidate_feature_diffs, valid_init_diffs,
inadmissible_diffs, new_feature_info) | Collect file and feature changes
Steps
1. Collects the files that have changed in this pull request as
compared to a comparison branch.
2. Categorize these file changes into admissible or inadmissible file
changes. Admissible file changes solely contribute python files to
the contrib subdirectory.
3. Collect features from admissible new files.
Returns:
CollectedChanges | entailment |
def _categorize_file_diffs(self, file_diffs):
"""Partition file changes into admissible and inadmissible changes"""
# TODO move this into a new validator
candidate_feature_diffs = []
valid_init_diffs = []
inadmissible_files = []
for diff in file_diffs:
valid, failures = check_from_class(
ProjectStructureCheck, diff, self.project)
if valid:
if pathlib.Path(diff.b_path).parts[-1] != '__init__.py':
candidate_feature_diffs.append(diff)
logger.debug(
'Categorized {file} as CANDIDATE FEATURE MODULE'
.format(file=diff.b_path))
else:
valid_init_diffs.append(diff)
logger.debug(
'Categorized {file} as VALID INIT MODULE'
.format(file=diff.b_path))
else:
inadmissible_files.append(diff)
logger.debug(
'Categorized {file} as INADMISSIBLE; '
'failures were {failures}'
.format(file=diff.b_path, failures=failures))
logger.info(
'Admitted {} candidate feature{} '
'and {} __init__ module{} '
'and rejected {} file{}'
.format(len(candidate_feature_diffs),
make_plural_suffix(candidate_feature_diffs),
len(valid_init_diffs),
make_plural_suffix(valid_init_diffs),
len(inadmissible_files),
make_plural_suffix(inadmissible_files)))
return candidate_feature_diffs, valid_init_diffs, inadmissible_files | Partition file changes into admissible and inadmissible changes | entailment |
def _collect_feature_info(self, candidate_feature_diffs):
"""Collect feature info
Args:
candidate_feature_diffs (List[git.diff.Diff]): list of Diffs
corresponding to admissible file changes compared to
comparison ref
Returns:
List[Tuple]: list of tuple of importer, module name, and module
path. The "importer" is a callable that returns a module
"""
project_root = self.project.path
for diff in candidate_feature_diffs:
path = diff.b_path
modname = relpath_to_modname(path)
modpath = project_root.joinpath(path)
importer = partial(import_module_at_path, modname, modpath)
yield importer, modname, modpath | Collect feature info
Args:
candidate_feature_diffs (List[git.diff.Diff]): list of Diffs
corresponding to admissible file changes compared to
comparison ref
Returns:
List[Tuple]: list of tuple of importer, module name, and module
path. The "importer" is a callable that returns a module | entailment |
def get_travis_branch():
"""Get current branch per Travis environment variables
If travis is building a PR, then TRAVIS_PULL_REQUEST is truthy and the
name of the branch corresponding to the PR is stored in the
TRAVIS_PULL_REQUEST_BRANCH environment variable. Else, the name of the
branch is stored in the TRAVIS_BRANCH environment variable.
See also: <https://docs.travis-ci.com/user/environment-variables/#default-environment-variables>
""" # noqa E501
try:
travis_pull_request = get_travis_env_or_fail('TRAVIS_PULL_REQUEST')
if truthy(travis_pull_request):
travis_pull_request_branch = get_travis_env_or_fail(
'TRAVIS_PULL_REQUEST_BRANCH')
return travis_pull_request_branch
else:
travis_branch = get_travis_env_or_fail('TRAVIS_BRANCH')
return travis_branch
except UnexpectedTravisEnvironmentError:
return None | Get current branch per Travis environment variables
If travis is building a PR, then TRAVIS_PULL_REQUEST is truthy and the
name of the branch corresponding to the PR is stored in the
TRAVIS_PULL_REQUEST_BRANCH environment variable. Else, the name of the
branch is stored in the TRAVIS_BRANCH environment variable.
See also: <https://docs.travis-ci.com/user/environment-variables/#default-environment-variables> | entailment |
def make_mapper(features):
"""Make a DataFrameMapper from a feature or list of features
Args:
features (Union[Feature, List[Feature]]): feature or list of features
Returns:
DataFrameMapper: mapper made from features
"""
if not features:
features = Feature(input=[], transformer=NullTransformer())
if not iterable(features):
features = (features, )
return DataFrameMapper(
[t.as_input_transformer_tuple() for t in features],
input_df=True) | Make a DataFrameMapper from a feature or list of features
Args:
features (Union[Feature, List[Feature]]): feature or list of features
Returns:
DataFrameMapper: mapper made from features | entailment |
def _name_estimators(estimators):
"""Generate names for estimators.
Adapted from sklearn.pipeline._name_estimators
"""
def get_name(estimator):
if isinstance(estimator, DelegatingRobustTransformer):
return get_name(estimator._transformer)
return type(estimator).__name__.lower()
names = list(map(get_name, estimators))
counter = dict(Counter(names))
counter = select_values(lambda x: x > 1, counter)
for i in reversed(range(len(estimators))):
name = names[i]
if name in counter:
names[i] += "-%d" % counter[name]
counter[name] -= 1
return list(zip(names, estimators)) | Generate names for estimators.
Adapted from sklearn.pipeline._name_estimators | entailment |
def _push(project):
"""Push default branch and project template branch to remote
With default config (i.e. remote and branch names), equivalent to::
$ git push origin master:master project-template:project-template
Raises:
ballet.exc.BalletError: Push failed in some way
"""
repo = project.repo
remote_name = project.get('project', 'remote')
remote = repo.remote(remote_name)
result = _call_remote_push(remote)
failures = lfilter(complement(did_git_push_succeed), result)
if failures:
for push_info in failures:
logger.error(
'Failed to push ref {from_ref} to {to_ref}'
.format(from_ref=push_info.local_ref.name,
to_ref=push_info.remote_ref.name))
raise BalletError('Push failed') | Push default branch and project template branch to remote
With default config (i.e. remote and branch names), equivalent to::
$ git push origin master:master project-template:project-template
Raises:
ballet.exc.BalletError: Push failed in some way | entailment |
def build(X_df=None, y_df=None):
"""Build features and target
Args:
X_df (DataFrame): raw variables
y_df (DataFrame): raw target
Returns:
dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y
"""
if X_df is None:
X_df, _ = load_data()
if y_df is None:
_, y_df = load_data()
features = get_contrib_features()
mapper_X = ballet.feature.make_mapper(features)
X = mapper_X.fit_transform(X_df)
encoder_y = get_target_encoder()
y = encoder_y.fit_transform(y_df)
return {
'X_df': X_df,
'features': features,
'mapper_X': mapper_X,
'X': X,
'y_df': y_df,
'encoder_y': encoder_y,
'y': y,
} | Build features and target
Args:
X_df (DataFrame): raw variables
y_df (DataFrame): raw target
Returns:
dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y | entailment |
def main(input_dir, output_dir):
"""Engineer features"""
import ballet.util.log
ballet.util.log.enable(logger=logger, level='INFO', echo=False)
ballet.util.log.enable(logger=ballet.util.log.logger, level='INFO',
echo=False)
X_df, y_df = load_data(input_dir=input_dir)
out = build()
mapper_X = out['mapper_X']
encoder_y = out['encoder_y']
X_ft = mapper_X.transform(X_df)
y_ft = encoder_y.transform(y_df)
save_features(X_ft, output_dir)
save_targets(y_ft, output_dir) | Engineer features | entailment |
def load_data(input_dir=None):
"""Load data"""
if input_dir is not None:
tables = conf.get('tables')
entities_table_name = conf.get('data', 'entities_table_name')
entities_config = some(where(tables, name=entities_table_name))
X = load_table_from_config(input_dir, entities_config)
targets_table_name = conf.get('data', 'targets_table_name')
targets_config = some(where(tables, name=targets_table_name))
y = load_table_from_config(input_dir, targets_config)
else:
raise NotImplementedError
return X, y | Load data | entailment |
def _write_header(self, epoch_data: EpochData) -> None:
"""
Write CSV header row with column names.
Column names are inferred from the ``epoch_data`` and ``self.variables`` (if specified).
Variables and streams expected later on are stored in ``self._variables`` and ``self._streams`` respectively.
:param epoch_data: epoch data to be logged
"""
self._variables = self._variables or list(next(iter(epoch_data.values())).keys())
self._streams = epoch_data.keys()
header = ['"epoch_id"']
for stream_name in self._streams:
header += [stream_name + '_' + var for var in self._variables]
with open(self._file_path, 'a') as file:
file.write(self._delimiter.join(header) + '\n')
self._header_written = True | Write CSV header row with column names.
Column names are inferred from the ``epoch_data`` and ``self.variables`` (if specified).
Variables and streams expected later on are stored in ``self._variables`` and ``self._streams`` respectively.
:param epoch_data: epoch data to be logged | entailment |
def _write_row(self, epoch_id: int, epoch_data: EpochData) -> None:
"""
Write a single epoch result row to the CSV file.
:param epoch_id: epoch number (will be written at the first column)
:param epoch_data: epoch data
:raise KeyError: if the variable is missing and ``self._on_missing_variable`` is set to ``error``
:raise TypeError: if the variable has wrong type and ``self._on_unknown_type`` is set to ``error``
"""
# list of values to be written
values = [epoch_id]
for stream_name in self._streams:
for variable_name in self._variables:
column_name = stream_name+'_'+variable_name
try:
value = epoch_data[stream_name][variable_name]
except KeyError as ex:
err_message = '`{}` not found in epoch data.'.format(column_name)
if self._on_missing_variable == 'error':
raise KeyError(err_message) from ex
elif self._on_missing_variable == 'warn':
logging.warning(err_message)
values.append(self._default_value)
continue
if isinstance(value, dict) and 'mean' in value:
value = value['mean']
elif isinstance(value, dict) and 'nanmean' in value:
value = value['nanmean']
if np.isscalar(value):
values.append(value)
else:
err_message = 'Variable `{}` value is not scalar.'.format(variable_name)
if self._on_unknown_type == 'error':
raise TypeError(err_message)
elif self._on_unknown_type == 'warn':
logging.warning(err_message)
values.append(self._default_value)
# write the row
with open(self._file_path, 'a') as file:
row = self._delimiter.join([str(value) for value in values])
file.write(row + '\n') | Write a single epoch result row to the CSV file.
:param epoch_id: epoch number (will be written at the first column)
:param epoch_data: epoch data
:raise KeyError: if the variable is missing and ``self._on_missing_variable`` is set to ``error``
:raise TypeError: if the variable has wrong type and ``self._on_unknown_type`` is set to ``error`` | entailment |
def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:
"""
Write a new row to the CSV file with the given epoch data.
In the case of first invocation, create the CSV header.
:param epoch_id: number of the epoch
:param epoch_data: epoch data to be logged
"""
logging.debug('Saving epoch %d data to "%s"', epoch_id, self._file_path)
if not self._header_written:
self._write_header(epoch_data=epoch_data)
self._write_row(epoch_id=epoch_id, epoch_data=epoch_data) | Write a new row to the CSV file with the given epoch data.
In the case of first invocation, create the CSV header.
:param epoch_id: number of the epoch
:param epoch_data: epoch data to be logged | entailment |
def get_random_name(sep: str='-'):
"""
Generate random docker-like name with the given separator.
:param sep: adjective-name separator string
:return: random docker-like name
"""
r = random.SystemRandom()
return '{}{}{}'.format(r.choice(_left), sep, r.choice(_right)) | Generate random docker-like name with the given separator.
:param sep: adjective-name separator string
:return: random docker-like name | entailment |
def _check_train_time(self) -> None:
"""
Stop the training if the training time exceeded ``self._minutes``.
:raise TrainingTerminated: if the training time exceeded ``self._minutes``
"""
if self._minutes is not None and (datetime.now() - self._training_start).total_seconds()/60 > self._minutes:
raise TrainingTerminated('Training terminated after more than {} minutes'.format(self._minutes)) | Stop the training if the training time exceeded ``self._minutes``.
:raise TrainingTerminated: if the training time exceeded ``self._minutes`` | entailment |
def after_batch(self, stream_name: str, batch_data: Batch) -> None:
"""
If ``stream_name`` equals to :py:attr:`cxflow.constants.TRAIN_STREAM`,
increase the iterations counter and possibly stop the training; additionally, call :py:meth:`_check_train_time`.
:param stream_name: stream name
:param batch_data: ignored
:raise TrainingTerminated: if the number of iterations reaches ``self._iters``
"""
self._check_train_time()
if self._iters is not None and stream_name == self._train_stream_name:
self._iters_done += 1
if self._iters_done >= self._iters:
raise TrainingTerminated('Training terminated after iteration {}'.format(self._iters_done)) | If ``stream_name`` equals to :py:attr:`cxflow.constants.TRAIN_STREAM`,
increase the iterations counter and possibly stop the training; additionally, call :py:meth:`_check_train_time`.
:param stream_name: stream name
:param batch_data: ignored
:raise TrainingTerminated: if the number of iterations reaches ``self._iters`` | entailment |
def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:
"""
Stop the training if the ``epoch_id`` reaches ``self._epochs``; additionally, call :py:meth:`_check_train_time`.
:param epoch_id: epoch id
:param epoch_data: ignored
:raise TrainingTerminated: if the ``epoch_id`` reaches ``self._epochs``
"""
self._check_train_time()
if self._epochs is not None and epoch_id >= self._epochs:
logging.info('EpochStopperHook triggered')
raise TrainingTerminated('Training terminated after epoch {}'.format(epoch_id)) | Stop the training if the ``epoch_id`` reaches ``self._epochs``; additionally, call :py:meth:`_check_train_time`.
:param epoch_id: epoch id
:param epoch_data: ignored
:raise TrainingTerminated: if the ``epoch_id`` reaches ``self._epochs`` | entailment |
def sanitize_url(url: str) -> str:
"""
Sanitize the given url so that it can be used as a valid filename.
:param url: url to create filename from
:raise ValueError: when the given url can not be sanitized
:return: created filename
"""
for part in reversed(url.split('/')):
filename = re.sub(r'[^a-zA-Z0-9_.\-]', '', part)
if len(filename) > 0:
break
else:
raise ValueError('Could not create reasonable name for file from url %s', url)
return filename | Sanitize the given url so that it can be used as a valid filename.
:param url: url to create filename from
:raise ValueError: when the given url can not be sanitized
:return: created filename | entailment |
def maybe_download_and_extract(data_root: str, url: str) -> None:
"""
Maybe download the specified file to ``data_root`` and try to unpack it with ``shutil.unpack_archive``.
:param data_root: data root to download the files to
:param url: url to download from
"""
# make sure data_root exists
os.makedirs(data_root, exist_ok=True)
# create sanitized filename from url
filename = sanitize_url(url)
# check whether the archive already exists
filepath = os.path.join(data_root, filename)
if os.path.exists(filepath):
logging.info('\t`%s` already exists; skipping', filepath)
return
# download with progressbar
try:
logging.info('\tdownloading %s', filepath)
req = requests.get(url, stream=True)
req.raise_for_status()
except requests.exceptions.RequestException as ex:
logging.error('File `%s` could not be downloaded, %s', filepath, ex)
return
expected_size = int(req.headers.get('content-length'))
chunk_size = 1024
with open(filepath, 'wb') as f_out,\
click.progressbar(req.iter_content(chunk_size=chunk_size), length=expected_size/chunk_size) as bar:
for chunk in bar:
if chunk:
f_out.write(chunk)
f_out.flush()
# extract
try:
shutil.unpack_archive(filepath, data_root)
except (shutil.ReadError, ValueError):
logging.info('File `%s` could not be extracted by `shutil.unpack_archive`. Please process it manually.',
filepath) | Maybe download the specified file to ``data_root`` and try to unpack it with ``shutil.unpack_archive``.
:param data_root: data root to download the files to
:param url: url to download from | entailment |
def _raise_check_aggregation(aggregation: str):
"""
Check whether the given aggregation is present in NumPy or it is one of EXTRA_AGGREGATIONS.
:param aggregation: the aggregation name
:raise ValueError: if the specified aggregation is not supported or found in NumPy
"""
if aggregation not in ComputeStats.EXTRA_AGGREGATIONS and not hasattr(np, aggregation):
raise ValueError('Aggregation `{}` is not a NumPy function or a member '
'of EXTRA_AGGREGATIONS.'.format(aggregation)) | Check whether the given aggregation is present in NumPy or it is one of EXTRA_AGGREGATIONS.
:param aggregation: the aggregation name
:raise ValueError: if the specified aggregation is not supported or found in NumPy | entailment |
def _compute_aggregation(aggregation: str, data: Iterable[Any]):
"""
Compute the specified aggregation on the given data.
:param aggregation: the name of an arbitrary NumPy function (e.g., mean, max, median, nanmean, ...)
or one of :py:attr:`EXTRA_AGGREGATIONS`.
:param data: data to be aggregated
:raise ValueError: if the specified aggregation is not supported or found in NumPy
"""
ComputeStats._raise_check_aggregation(aggregation)
if aggregation == 'nanfraction':
return np.sum(np.isnan(data)) / len(data)
if aggregation == 'nancount':
return int(np.sum(np.isnan(data)))
return getattr(np, aggregation)(data) | Compute the specified aggregation on the given data.
:param aggregation: the name of an arbitrary NumPy function (e.g., mean, max, median, nanmean, ...)
or one of :py:attr:`EXTRA_AGGREGATIONS`.
:param data: data to be aggregated
:raise ValueError: if the specified aggregation is not supported or found in NumPy | entailment |
def _save_stats(self, epoch_data: EpochData) -> None:
"""
Extend ``epoch_data`` by stream:variable:aggreagation data.
:param epoch_data: data source from which the statistics are computed
"""
for stream_name in epoch_data.keys():
for variable, aggregations in self._variable_aggregations.items():
# variables are already checked in the AccumulatingHook; hence, we do not check them here
epoch_data[stream_name][variable] = OrderedDict(
{aggr: ComputeStats._compute_aggregation(aggr, self._accumulator[stream_name][variable])
for aggr in aggregations}) | Extend ``epoch_data`` by stream:variable:aggreagation data.
:param epoch_data: data source from which the statistics are computed | entailment |
def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:
"""
Compute the specified aggregations and save them to the given epoch data.
:param epoch_data: epoch data to be processed
"""
self._save_stats(epoch_data)
super().after_epoch(epoch_data=epoch_data, **kwargs) | Compute the specified aggregations and save them to the given epoch data.
:param epoch_data: epoch data to be processed | entailment |
def save(self) -> None:
"""
Save the training trace to :py:attr:`CXF_TRACE_FILE` file under the specified directory.
:raise ValueError: if no output directory was specified
"""
if self._output_dir is None:
raise ValueError('Can not save TrainingTrace without output dir.')
yaml_to_file(self._trace, self._output_dir, CXF_TRACE_FILE) | Save the training trace to :py:attr:`CXF_TRACE_FILE` file under the specified directory.
:raise ValueError: if no output directory was specified | entailment |
def from_file(filepath: str):
"""
Load training trace from the given ``filepath``.
:param filepath: training trace file path
:return: training trace
"""
trace = TrainingTrace()
trace._trace = load_config(filepath)
return trace | Load training trace from the given ``filepath``.
:param filepath: training trace file path
:return: training trace | entailment |
def after_epoch(self, epoch_id: int, epoch_data: EpochData):
"""
Check termination conditions.
:param epoch_id: number of the processed epoch
:param epoch_data: epoch data to be checked
:raise KeyError: if the stream of variable was not found in ``epoch_data``
:raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation
:raise ValueError: if the specified number of epochs exceeded
:raise TrainingTerminated: if the monitor variable is above the required level
"""
if self._stream not in epoch_data:
raise KeyError('The hook could not determine whether the threshold was exceeded as the stream `{}`'
'was not found in the epoch data'.format(self._stream))
if self._variable not in epoch_data[self._stream]:
raise KeyError('The hook could not determine whether the threshold was exceeded as the variable `{}`'
'was not found in the epoch data stream `{}`'.format(self._variable, self._stream))
value = epoch_data[self._stream][self._variable]
if isinstance(value, dict) and 'mean' in value:
value = value['mean']
if not np.isscalar(value):
raise TypeError('The value to be checked has to be either a scalar or a dict with `mean` key. '
'Got `{}` instead.'.format(type(value).__name__))
if value > self._required_min_value:
raise TrainingTerminated('{} {} level matched (current {} is greater than required {}).'
.format(self._stream, self._variable, value, self._required_min_value))
elif epoch_id >= self._max_epoch:
raise ValueError('{} {} was only {} in epoch {}, but {} was required. Training failed.'
.format(self._stream, self._variable, value, epoch_id, self._required_min_value)) | Check termination conditions.
:param epoch_id: number of the processed epoch
:param epoch_data: epoch data to be checked
:raise KeyError: if the stream of variable was not found in ``epoch_data``
:raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation
:raise ValueError: if the specified number of epochs exceeded
:raise TrainingTerminated: if the monitor variable is above the required level | entailment |
def train(config_path: str, cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config and start the training.
:param config_path: path to configuration file
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root) | Load config and start the training.
:param config_path: path to configuration file
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | entailment |
def evaluate(model_path: str, stream_name: str, config_path: Optional[str], cl_arguments: Iterable[str],
output_root: str) -> None:
"""
Evaluate the given model on the specified data stream.
Configuration is updated by the respective predict.stream_name section, in particular:
- hooks section is entirely replaced
- model and dataset sections are updated
:param model_path: path to the model to be evaluated
:param stream_name: data stream name to be evaluated
:param config_path: path to the config to be used, if not specified infer the path from ``model_path``
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
model_dir = path.dirname(model_path) if not path.isdir(model_path) else model_path
config_path = find_config(model_dir if config_path is None else config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
if stream_name == CXF_PREDICT_STREAM and stream_name in config: # old style ``cxflow predict ...``
logging.warning('Old style ``predict`` configuration section is deprecated and will not be supported, '
'use ``eval.predict`` section instead.')
config['eval'] = {'predict': config['predict']}
if 'eval' in config and stream_name in config['eval']:
update_section = config['eval'][stream_name]
for subsection in ['dataset', 'model', 'main_loop']:
if subsection in update_section:
config[subsection].update(update_section[subsection])
if 'hooks' in update_section:
config['hooks'] = update_section['hooks']
else:
logging.warning('Config does not contain `eval.%s.hooks` section. '
'No hook will be employed during the evaluation.', stream_name)
config['hooks'] = []
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=model_path, eval=stream_name) | Evaluate the given model on the specified data stream.
Configuration is updated by the respective predict.stream_name section, in particular:
- hooks section is entirely replaced
- model and dataset sections are updated
:param model_path: path to the model to be evaluated
:param stream_name: data stream name to be evaluated
:param config_path: path to the config to be used, if not specified infer the path from ``model_path``
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | entailment |
def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Run prediction from the specified config path.
If the config contains a ``predict`` section:
- override hooks with ``predict.hooks`` if present
- update dataset, model and main loop sections if the respective sections are present
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
if 'predict' in config:
for section in ['dataset', 'model', 'main_loop']:
if section in config['predict']:
config[section].update(config['predict'][section])
if 'hooks' in config['predict']:
config['hooks'] = config['predict']['hooks']
else:
logging.warning('Config does not contain `predict.hooks` section. '
'No hook will be employed during the prediction.')
config['hooks'] = []
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from, eval='predict') | Run prediction from the specified config path.
If the config contains a ``predict`` section:
- override hooks with ``predict.hooks`` if present
- update dataset, model and main loop sections if the respective sections are present
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | entailment |
def _create_epoch_data(self, streams: Optional[Iterable[str]]=None) -> EpochData:
"""Create empty epoch data double dict."""
if streams is None:
streams = [self._train_stream_name] + self._extra_streams
return OrderedDict([(stream_name, OrderedDict()) for stream_name in streams]) | Create empty epoch data double dict. | entailment |
def _check_sources(self, batch: Dict[str, object]) -> None:
"""
Check for unused and missing sources.
:param batch: batch to be checked
:raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error``
"""
unused_sources = [source for source in batch.keys() if source not in self._model.input_names]
missing_sources = [source for source in self._model.input_names if source not in batch.keys()]
# check stream sources
if unused_sources:
if self._on_unused_sources == 'warn' and not self._extra_sources_warned:
logging.warning('Some sources provided by the stream do not match model placeholders. Set '
'`main_loop.on_unused_sources` to `ignore` in order to suppress this warning. '
'Extra sources: %s', unused_sources)
self._extra_sources_warned = True
elif self._on_unused_sources == 'error':
raise ValueError('Some sources provided by the stream do not match model placeholders. Set'
'`main_loop.on_unused_sources` to `warn` in order to suppress this error.\n'
'Extra sources: {}'.format(unused_sources))
if missing_sources:
raise ValueError('Stream does not provide all required sources. Missing sources: {}'
.format(missing_sources)) | Check for unused and missing sources.
:param batch: batch to be checked
:raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error`` | entailment |
def _run_epoch(self, stream: StreamWrapper, train: bool) -> None:
"""
Iterate through the given stream and evaluate/train the model with the received batches.
Calls :py:meth:`cxflow.hooks.AbstractHook.after_batch` events.
:param stream: stream to iterate
:param train: if set to ``True``, the model will be trained
:raise ValueError: in case of empty batch when ``on_empty_batch`` is set to ``error``
:raise ValueError: in case of empty stream when ``on_empty_stream`` is set to ``error``
:raise ValueError: in case of two batch variables having different lengths
"""
nonempty_batch_count = 0
for i, batch_input in enumerate(stream):
self.raise_check_interrupt()
batch_sizes = {len(source) for source in batch_input.values()}
if len(batch_sizes) == 0 or batch_sizes == {0}:
if self._on_empty_batch == 'warn':
logging.warning('%i-th batch in stream `%s` appears to be empty (%i-th empty batch in total). Set '
'`main_loop.on_empty_batch` to `ignore` in order to suppress this warning.',
i, stream.name, nonempty_batch_count)
elif self._on_empty_batch == 'error':
raise ValueError('{}-th batch in stream `{}` appears to be empty ({}-th empty batch in total). Set '
'`main_loop.on_empty_batch` to `warn` in order to change this error into warning; '
'set to `ignore` to remove it.'.format(i, stream.name, nonempty_batch_count))
continue
elif self._fixed_batch_size:
if batch_sizes != {self._fixed_batch_size}:
var, len_ = [(k, len(v)) for k, v in batch_input.items() if len(v) != self._fixed_batch_size][0]
logging.debug('%i-th batch in stream `%s` has variable `%s` of length %i inconsistent with '
'`main_loop.fixed_size` = %i', i, stream.name, var, len_, self._fixed_batch_size)
continue
nonempty_batch_count += 1
self._check_sources(batch_input)
with Timer('eval_batch_{}'.format(stream.name), self._epoch_profile):
batch_output = self._model.run(batch=batch_input, train=train, stream=stream)
assert set(batch_input.keys()).isdisjoint(set(batch_output)), 'Batch inputs and outputs must not overlap.'
with Timer('after_batch_hooks_{}'.format(stream.name), self._epoch_profile):
batch_data = {**batch_input, **batch_output}
for hook in self._hooks:
hook.after_batch(stream_name=stream.name, batch_data=batch_data)
if nonempty_batch_count == 0:
if self._on_empty_stream == 'warn':
logging.warning('Stream `%s` appears to be empty. Set `main_loop.on_empty_stream` to `ignore` in order '
'to suppress this warning.', stream.name)
elif self._on_empty_stream == 'error':
raise ValueError('Stream `{}` appears to be empty. Set '
'`main_loop.on_empty_stream` to `warn` in order to change this error into warning; '
'set to `ignore` to remove it.'.format(stream.name)) | Iterate through the given stream and evaluate/train the model with the received batches.
Calls :py:meth:`cxflow.hooks.AbstractHook.after_batch` events.
:param stream: stream to iterate
:param train: if set to ``True``, the model will be trained
:raise ValueError: in case of empty batch when ``on_empty_batch`` is set to ``error``
:raise ValueError: in case of empty stream when ``on_empty_stream`` is set to ``error``
:raise ValueError: in case of two batch variables having different lengths | entailment |
def train_by_stream(self, stream: StreamWrapper) -> None:
"""
Train the model with the given stream.
:param stream: stream to train with
"""
self._run_epoch(stream=stream, train=True) | Train the model with the given stream.
:param stream: stream to train with | entailment |
def evaluate_stream(self, stream: StreamWrapper) -> None:
"""
Evaluate the given stream.
:param stream: stream to be evaluated
:param stream_name: stream name
"""
self._run_epoch(stream=stream, train=False) | Evaluate the given stream.
:param stream: stream to be evaluated
:param stream_name: stream name | entailment |
def get_stream(self, stream_name: str) -> StreamWrapper:
"""
Get a :py:class:`StreamWrapper` with the given name.
:param stream_name: stream name
:return: dataset function name providing the respective stream
:raise AttributeError: if the dataset does not provide the function creating the stream
"""
if stream_name not in self._streams:
stream_fn_name = '{}_stream'.format(stream_name)
try:
stream_fn = getattr(self._dataset, stream_fn_name)
stream_epoch_limit = -1
if self._fixed_epoch_size is not None and stream_name == self._train_stream_name:
stream_epoch_limit = self._fixed_epoch_size
self._streams[stream_name] = StreamWrapper(stream_fn, buffer_size=self._buffer,
epoch_size=stream_epoch_limit, name=stream_name,
profile=self._epoch_profile)
except AttributeError as ex:
raise AttributeError('The dataset does not have a function for creating a stream named `{}`. '
'The function has to be named `{}`.'.format(stream_name, stream_fn_name)) from ex
return self._streams[stream_name] | Get a :py:class:`StreamWrapper` with the given name.
:param stream_name: stream name
:return: dataset function name providing the respective stream
:raise AttributeError: if the dataset does not provide the function creating the stream | entailment |
def _run_zeroth_epoch(self, streams: Iterable[str]) -> None:
"""
Run zeroth epoch on the specified streams.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
:param streams: stream names to be evaluated
"""
for stream_name in streams:
with self.get_stream(stream_name) as stream:
self.evaluate_stream(stream)
epoch_data = self._create_epoch_data(streams)
for hook in self._hooks:
hook.after_epoch(epoch_id=0, epoch_data=epoch_data) | Run zeroth epoch on the specified streams.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
:param streams: stream names to be evaluated | entailment |
def _try_run(self, run_func: Callable[[], None]) -> None:
"""
Try running the given function (training/prediction).
Calls
- :py:meth:`cxflow.hooks.AbstractHook.before_training`
- :py:meth:`cxflow.hooks.AbstractHook.after_training`
:param run_func: function to be run
"""
# Initialization: before_training
for hook in self._hooks:
hook.before_training()
try:
run_func()
except TrainingTerminated as ex:
logging.info('Training terminated: %s', ex)
# After training: after_training
for hook in self._hooks:
hook.after_training() | Try running the given function (training/prediction).
Calls
- :py:meth:`cxflow.hooks.AbstractHook.before_training`
- :py:meth:`cxflow.hooks.AbstractHook.after_training`
:param run_func: function to be run | entailment |
def run_training(self, trace: Optional[TrainingTrace]=None) -> None:
"""
Run the main loop in the training mode.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch_profile`
"""
for stream_name in [self._train_stream_name] + self._extra_streams:
self.get_stream(stream_name)
def training():
logging.debug('Training started')
self._epochs_done = 0
# Zeroth epoch: after_epoch
if not self._skip_zeroth_epoch:
logging.info('Evaluating 0th epoch')
self._run_zeroth_epoch([self._train_stream_name] + self._extra_streams)
logging.info('0th epoch done\n\n')
# Training loop: after_epoch, after_epoch_profile
while True:
epoch_id = self._epochs_done + 1
logging.info('Training epoch %s', epoch_id)
self._epoch_profile.clear()
epoch_data = self._create_epoch_data()
with self.get_stream(self._train_stream_name) as stream:
self.train_by_stream(stream)
for stream_name in self._extra_streams:
with self.get_stream(stream_name) as stream:
self.evaluate_stream(stream)
with Timer('after_epoch_hooks', self._epoch_profile):
for hook in self._hooks:
hook.after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)
for hook in self._hooks:
hook.after_epoch_profile(epoch_id=epoch_id, profile=self._epoch_profile,
train_stream_name=self._train_stream_name,
extra_streams=self._extra_streams)
self._epochs_done = epoch_id
if trace is not None:
trace[TrainingTraceKeys.EPOCHS_DONE] = self._epochs_done
logging.info('Epoch %s done\n\n', epoch_id)
self._try_run(training) | Run the main loop in the training mode.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch_profile` | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.