repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
keras | keras-master/keras/mixed_precision/get_layer_policy.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the get_layer_policy function.
This is a separate file from policy.py to avoid a circular dependency.
get_layer_policy() relies on base_layer.py, itself which relies on policy.py.
"""
from keras.engine import base_layer
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.mixed_precision.experimental.get_layer_policy', v1=[])
def get_layer_policy(layer):
"""Returns the dtype policy of a layer.
Warning: This function is deprecated. Use
`tf.keras.layers.Layer.dtype_policy` instead.
Args:
layer: A `tf.keras.layers.Layer`.
Returns:
The `tf.keras.mixed_precision.Policy` of the layer.
"""
if not isinstance(layer, base_layer.Layer):
raise ValueError('get_policy can only be called on a layer, but got: %s'
% (layer,))
return layer.dtype_policy
| 1,528 | 35.404762 | 80 | py |
keras | keras-master/keras/mixed_precision/loss_scale.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains keras-specific LossScale functionality.
This functions cannot be in the non-keras loss_scale.py file since they depend
on keras, and files outside of keras should not depend on files inside keras.
"""
import tensorflow.compat.v2 as tf
from keras.utils import generic_utils
def serialize(loss_scale):
return generic_utils.serialize_keras_object(loss_scale)
def deserialize(config, custom_objects=None):
loss_scale_module_objects = {
'FixedLossScale': tf.mixed_precision.experimental.FixedLossScale,
'DynamicLossScale': tf.mixed_precision.experimental.DynamicLossScale,
}
return generic_utils.deserialize_keras_object(
config,
module_objects=loss_scale_module_objects,
custom_objects=custom_objects,
printable_module_name='loss scale'
)
def get(identifier):
"""Get a loss scale object."""
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, (int, float)):
return tf.mixed_precision.experimental.FixedLossScale(identifier)
if identifier == 'dynamic':
return tf.mixed_precision.experimental.DynamicLossScale()
if isinstance(identifier, tf.mixed_precision.experimental.LossScale):
return identifier
elif identifier is None:
return None
else:
raise ValueError('Could not interpret loss scale identifier: %s' %
identifier)
| 2,071 | 33.533333 | 80 | py |
keras | keras-master/keras/mixed_precision/layer_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests keras.layers.Layer works properly with mixed precision."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras import keras_parameterized
from keras import layers
from keras import models
from keras.engine import base_layer
from keras.engine import base_layer_utils
from keras.engine import input_spec
from keras.mixed_precision import get_layer_policy
from keras.mixed_precision import policy
from keras.mixed_precision import test_util as mp_test_util
from keras.optimizer_v2 import gradient_descent
class MultiplyLayerWithFunction(mp_test_util.MultiplyLayer):
"""Same as MultiplyLayer, but _multiply is decorated with a tf.function."""
@tf.function
def _multiply(self, x, y):
return super(MultiplyLayerWithFunction, self)._multiply(x, y)
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = tf.distribute.get_strategy
def create_mirrored_strategy():
"""Create a MirroredStrategy, using a GPU if it is available."""
if tf.config.list_logical_devices('GPU'):
return tf.distribute.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return tf.distribute.MirroredStrategy(['cpu:0'])
def create_central_storage_strategy():
"""Create a CentralStorageStrategy, using a GPU if it is available."""
compute_devices = ['cpu:0', 'gpu:0'] if (
tf.config.list_logical_devices('GPU')) else ['cpu:0']
return tf.distribute.experimental.CentralStorageStrategy(
compute_devices, parameter_device='cpu:0')
TESTCASES = ({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy
})
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LayerTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras layers."""
@parameterized.named_parameters(*TESTCASES)
def test_mixed_policies_(self, strategy_fn):
strategy = strategy_fn()
for dtype in 'float16', 'bfloat16':
x = tf.constant([1.])
policy_name = 'mixed_' + dtype
with strategy.scope(), policy.policy_scope(policy_name):
layer = mp_test_util.MultiplyLayer(assert_type=dtype)
self.assertEqual(layer.dtype, tf.float32)
self.assertEqual(get_layer_policy.get_layer_policy(layer).name,
policy_name)
y = layer(x)
self.assertEqual(layer.v.dtype, tf.float32)
self.assertEqual(y.dtype, dtype)
self.assertEqual(layer.dtype_policy.name, policy_name)
self.assertIsInstance(layer.dtype_policy, policy.Policy)
self.assertEqual(layer.compute_dtype, dtype)
self.assertEqual(layer.dtype, tf.float32)
self.assertEqual(layer.variable_dtype, tf.float32)
self.assertEqual(get_layer_policy.get_layer_policy(layer).name,
policy_name)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(y), 1.)
def test_layer_with_int_variable(self):
class LayerWithIntVar(base_layer.Layer):
def build(self, _):
self.v = self.add_weight('v', dtype='int32', trainable=False)
def call(self, inputs):
# Only float variables should be autocasted. This will fail if self.v is
# autocasted to float32
return tf.cast(inputs, 'int32') + self.v
x = tf.constant([1.])
layer = LayerWithIntVar(dtype='mixed_float16')
self.assertEqual(layer(x).dtype, 'int32')
@parameterized.named_parameters(*TESTCASES)
def test_layer_with_non_autocast_variable(self, strategy_fn):
x = tf.constant([1.])
with strategy_fn().scope():
with policy.policy_scope('mixed_float16'):
layer = mp_test_util.MultiplyLayerWithoutAutoCast(
assert_type=tf.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, tf.float32)
self.assertEqual(y.dtype, tf.float16)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(y), 1.)
@parameterized.named_parameters(*TESTCASES)
def test_layer_calling_tf_function(self, strategy_fn):
x = tf.constant([1.])
with strategy_fn().scope():
with policy.policy_scope('mixed_float16'):
layer = MultiplyLayerWithFunction(assert_type=tf.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, tf.float32)
self.assertEqual(y.dtype, tf.float16)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(y), 1.)
@parameterized.named_parameters(*TESTCASES)
def test_layer_regularizer_runs_in_var_dtype(self, strategy_fn):
x = tf.constant([1.])
with strategy_fn().scope():
with policy.policy_scope('mixed_float16'):
# Test on MultiplyLayer
layer = mp_test_util.MultiplyLayer(
assert_type=tf.float16,
regularizer=mp_test_util.IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
# Test on MultiplyLayerWithoutAutoCast
layer = mp_test_util.MultiplyLayerWithoutAutoCast(
assert_type=tf.float16,
regularizer=mp_test_util.IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
@parameterized.named_parameters(*TESTCASES)
def test_passing_policy_to_layer(self, strategy_fn):
x = tf.constant([1.], dtype=tf.float16)
with strategy_fn().scope():
# Passing a Policy to 'dtype' sets the policy for that layer.
layer = mp_test_util.MultiplyLayer(
assert_type=tf.float16, dtype=policy.Policy('mixed_float16'))
# layer.dtype refers to the variable dtype
self.assertEqual(layer.dtype, tf.float32)
layer(x)
self.assertEqual(layer.v.dtype, tf.float32)
with policy.policy_scope('mixed_float16'):
# Passing a Policy to dtype overrides the global Policy
layer = mp_test_util.MultiplyLayer(
assert_type=tf.float64, dtype=policy.Policy('float64'))
self.assertEqual(layer.dtype_policy.name, 'float64')
self.assertIsInstance(layer.dtype_policy, policy.Policy)
self.assertEqual(layer.compute_dtype, tf.float64)
self.assertEqual(layer.dtype, tf.float64)
self.assertEqual(layer.variable_dtype, tf.float64)
self.assertEqual(layer(x).dtype, tf.float64)
self.assertEqual(layer.v.dtype, tf.float64)
@parameterized.named_parameters(*TESTCASES)
def test_gradient(self, strategy_fn):
x = tf.constant([1.])
with strategy_fn().scope() as strategy:
with policy.policy_scope('mixed_float16'):
layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate is not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2**-14)
def run_fn():
with tf.GradientTape() as tape:
y = layer(x)
# Divide by num_replicas_in_sync, as the effective total loss is the
# sum of each of the replica's losses.
y /= strategy.num_replicas_in_sync
grad = tape.gradient(y, layer.v)
return opt.apply_gradients([(grad, layer.v)])
op = strategy.experimental_run(run_fn)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(op)
# The gradient with respective to the variable is 1. Since the
# variable is initialized with 1 and the learning rate is 2**-14, the
# new variable value should be: init_val - gradient * learning_rate,
# which is 1 - 1 * 2**-14
self.assertEqual(self.evaluate(layer.v), 1 - 2**-14)
def _test_checkpointing_layer_weights(self, strategy_fn,
mixed_prec_when_saving,
mixed_prec_when_loading):
# In this test, we potentially save with mixed precision enabled and load
# with mixed precision disabled, or vice versa. This is possible because
# variables are float32 regardless of whether mixed precision is enabled.
save_policy = 'mixed_float16' if mixed_prec_when_saving else 'float32'
load_policy = 'mixed_float16' if mixed_prec_when_loading else 'float32'
save_input_dtype = 'float16' if mixed_prec_when_saving else 'float32'
load_input_dtype = 'float16' if mixed_prec_when_loading else 'float32'
# Create a layer and save a checkpoint.
x = tf.constant([1.])
with strategy_fn().scope():
with policy.policy_scope(save_policy):
layer = mp_test_util.MultiplyLayer(assert_type=save_input_dtype)
layer(x) # Build layer
layer.set_weights([np.array(100.)])
self.assertEqual(self.evaluate(layer(x)), 100.)
checkpoint = tf.train.Checkpoint(layer=layer)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Create a new layer and restore the checkpoint.
x = tf.constant([1.])
with strategy_fn().scope():
with policy.policy_scope(load_policy):
layer = mp_test_util.MultiplyLayer(assert_type=load_input_dtype)
layer(x) # Build layer
layer.set_weights([np.array(200.)])
self.assertEqual(self.evaluate(layer(x)), 200.)
checkpoint = tf.train.Checkpoint(layer=layer)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(layer.get_weights(), [100.])
self.assertEqual(self.evaluate(layer(x)), 100.)
@parameterized.named_parameters(*TESTCASES)
def test_checkpointing_layer_weights(self, strategy_fn):
with self.test_session():
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=True,
mixed_prec_when_loading=True)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=True,
mixed_prec_when_loading=False)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=False,
mixed_prec_when_loading=True)
@parameterized.named_parameters(*TESTCASES)
def test_config(self, strategy_fn):
x = tf.constant([1.], dtype=tf.float16)
with strategy_fn().scope():
for layer, dtype in (
(mp_test_util.MultiplyLayer(), 'float32'),
(mp_test_util.MultiplyLayer(dtype='float64'), 'float64'),
(mp_test_util.MultiplyLayer(dtype=policy.Policy('float64')),
'float64')):
config = layer.get_config()
self.assertEqual(config['dtype'], dtype)
self.assertIsInstance(config['dtype'], str)
layer = mp_test_util.MultiplyLayer.from_config(config)
self.assertEqual(layer.dtype, dtype)
self.assertEqual(layer(x).dtype, dtype)
self.assertEqual(layer.v.dtype, dtype)
layer = mp_test_util.MultiplyLayer(dtype='mixed_float16')
config = layer.get_config()
self.assertEqual(config['dtype'],
{'class_name': 'Policy',
'config': {'name': 'mixed_float16'}})
layer = mp_test_util.MultiplyLayer.from_config(config)
self.assertEqual(layer.dtype, 'float32')
self.assertEqual(layer(x).dtype, 'float16')
self.assertEqual(layer.v.dtype, 'float32')
config = layer.get_config()
self.assertEqual(config['dtype'],
{'class_name': 'Policy',
'config': {'name': 'mixed_float16'}})
layer = mp_test_util.MultiplyLayer(dtype=policy.Policy('_infer'))
config = layer.get_config()
self.assertIsNone(config['dtype'])
layer = mp_test_util.MultiplyLayer.from_config(config)
# If a layer is serialized with the "_infer" policy, when deserialized
# into TF 2 it will have the global policy instead of "_infer". This is
# because "_infer" is serialized into None, and passing dtype=None in
# TensorFlow 2 indicates to use the global policy.
self.assertEqual(layer.dtype, 'float32')
self.assertEqual(layer(x).dtype, 'float32')
self.assertEqual(layer.v.dtype, 'float32')
@parameterized.named_parameters(*TESTCASES)
def test_config_policy_v1(self, strategy_fn):
x = tf.constant([1.], dtype=tf.float16)
with strategy_fn().scope():
layer = mp_test_util.MultiplyLayer(dtype=policy.PolicyV1('mixed_float16',
loss_scale=None))
config = layer.get_config()
self.assertEqual(config['dtype'],
{'class_name': 'PolicyV1',
'config': {'name': 'mixed_float16',
'loss_scale': None}})
layer = mp_test_util.MultiplyLayer.from_config(config)
self.assertEqual(layer.dtype, 'float32')
self.assertEqual(layer(x).dtype, 'float16')
self.assertEqual(layer.v.dtype, 'float32')
# Restoring a PolicyV1 silently converts it to a Policy and drops the loss
# scale.
self.assertEqual(type(layer.dtype_policy), policy.Policy)
config = layer.get_config()
# The loss_scale is silently dropped
self.assertEqual(config['dtype'],
{'class_name': 'Policy',
'config': {'name': 'mixed_float16'}})
layer = mp_test_util.MultiplyLayer(dtype=policy.PolicyV1('float64',
loss_scale=2.))
config = layer.get_config()
self.assertEqual(config['dtype'],
{'class_name': 'PolicyV1',
'config': {'name': 'float64',
'loss_scale': {
'class_name': 'FixedLossScale',
'config': {'loss_scale_value': 2.0}}}})
layer = mp_test_util.MultiplyLayer.from_config(config)
self.assertEqual(layer.dtype, 'float64')
self.assertEqual(layer(x).dtype, 'float64')
self.assertEqual(layer.v.dtype, 'float64')
self.assertEqual(type(layer.dtype_policy), policy.Policy)
config = layer.get_config()
self.assertEqual(config['dtype'], 'float64')
layer = mp_test_util.MultiplyLayer(dtype=policy.PolicyV1('_infer',
loss_scale=2.))
config = layer.get_config()
self.assertEqual(config['dtype'],
{'class_name': 'PolicyV1',
'config': {'name': '_infer',
'loss_scale': {
'class_name': 'FixedLossScale',
'config': {'loss_scale_value': 2.0}}}})
layer = mp_test_util.MultiplyLayer.from_config(config)
self.assertEqual(layer.dtype, None)
self.assertEqual(layer(x).dtype, 'float16')
self.assertEqual(layer.v.dtype, 'float16')
self.assertEqual(type(layer.dtype_policy), policy.Policy)
config = layer.get_config()
self.assertEqual(config['dtype'], 'float16')
def test_delete_variable(self):
layer = base_layer.Layer(dtype='mixed_float16')
layer.x = layer.add_weight('x')
self.assertEqual(layer.trainable_weights, [layer.x])
del layer.x
self.assertEqual(layer.trainable_weights, [])
def test_build_and_call_layer_in_function(self):
layer = mp_test_util.MultiplyLayer(dtype=policy.Policy('mixed_float16'))
@tf.function
def f():
return layer(1.)
y = f()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(y.dtype, 'float16')
self.assertEqual(layer.v.dtype, 'float32')
self.assertEqual(self.evaluate(y), 1.)
def test_unsupported_strategy(self):
strategy = create_central_storage_strategy()
with strategy.scope(), self.assertRaisesRegex(
ValueError, 'Mixed precision is not supported with the '
'tf.distribute.Strategy: CentralStorageStrategy. Either '
'stop using mixed precision by removing the use of the '
'"mixed_float16" policy or use a different Strategy, e.g. '
'a MirroredStrategy.'):
mp_test_util.MultiplyLayer(dtype='mixed_float16')
# Non-mixed policies are fine
mp_test_util.MultiplyLayer(dtype=policy.Policy('float64'))
def test_input_spec_dtype(self):
# Test the InputSpec's dtype is compared against the inputs before the layer
# casts them, not after.
layer = mp_test_util.MultiplyLayer(dtype='float64')
layer.input_spec = input_spec.InputSpec(dtype='float16')
# Test passing Eager tensors
x = tf.ones((2, 2), dtype='float16')
layer(x)
x = tf.ones((2, 2), dtype='float64')
with self.assertRaisesRegex(
ValueError, 'expected dtype=float16, found dtype=.*float64'):
layer(x)
# Test passing symbolic tensors
x = layers.Input((2,), dtype='float16')
y = layer(x)
model = models.Model(x, y)
model(tf.ones((2, 2)))
x = layers.Input((2,), dtype='float64')
with self.assertRaisesRegex(
ValueError, 'expected dtype=float16, found dtype=.*float64'):
# In TF2, the error is only raised when the model is run
y = layer(x)
model = models.Model(x, y)
model(tf.ones((2, 2)))
if __name__ == '__main__':
base_layer_utils.enable_v2_dtype_behavior()
tf.test.main()
| 18,563 | 41.774194 | 80 | py |
keras | keras-master/keras/mixed_precision/policy.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
import tensorflow.compat.v2 as tf
import contextlib
from keras import backend
from keras.engine import base_layer_utils
from keras.mixed_precision import device_compatibility_check
from keras.mixed_precision import loss_scale as keras_loss_scale_module
from keras.utils import generic_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.Policy', v1=[])
class Policy:
"""A dtype policy for a Keras layer.
A dtype policy determines a layer's computation and variable dtypes. Each
layer has a policy. Policies can be passed to the `dtype` argument of layer
constructors, or a global policy can be set with
`tf.keras.mixed_precision.set_global_policy`.
Args:
name: The policy name, which determines the compute and variable dtypes. Can
be any dtype name, such as `'float32'` or `'float64'`, which causes both
the compute and variable dtypes will be that dtype. Can also be the string
`'mixed_float16'` or `'mixed_bfloat16'`, which causes the compute dtype to
be float16 or bfloat16 and the variable dtype to be float32.
Typically you only need to interact with dtype policies when using mixed
precision, which is the use of float16 or bfloat16 for computations and
float32 for variables. This is why the term `mixed_precision` appears in the
API name. Mixed precision can be enabled by passing `'mixed_float16'` or
`'mixed_bfloat16'` to `tf.keras.mixed_precision.set_global_policy`. See [the
mixed precision guide](https://www.tensorflow.org/guide/keras/mixed_precision)
for more information on how to use mixed precision.
>>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
>>> layer1 = tf.keras.layers.Dense(10)
>>> layer1.dtype_policy # `layer1` will automatically use mixed precision
<Policy "mixed_float16">
>>> # Can optionally override layer to use float32 instead of mixed precision.
>>> layer2 = tf.keras.layers.Dense(10, dtype='float32')
>>> layer2.dtype_policy
<Policy "float32">
>>> # Set policy back to initial float32 for future examples.
>>> tf.keras.mixed_precision.set_global_policy('float32')
In the example above, passing `dtype='float32'` to the layer is equivalent to
passing `dtype=tf.keras.mixed_precision.Policy('float32')`. In general,
passing a dtype policy name to a layer is equivalent to passing the
corresponding policy, so it is never necessary to explicitly construct a
`Policy` object.
Note: `Model.compile` will automatically wrap an optimizer with a
`tf.keras.mixed_precision.LossScaleOptimizer` if you use the `'mixed_float16'`
policy. If you use a custom training loop instead of calling `Model.compile`,
you should explicitly use a `tf.keras.mixed_precision.LossScaleOptimizer` to
avoid numeric underflow with float16.
### How a layer uses its policy's compute dtype
A layer casts its inputs to its compute dtype. This causes the layer's
computations and output to also be in the compute dtype. For example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> layer.compute_dtype # Equivalent to layer.dtype_policy.compute_dtype
'float32'
>>> # `layer` casts its inputs to its compute dtype and does computations in
>>> # that dtype.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method are
casted (although this will likely be changed in a future minor release). For
example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer with multiple inputs, you should either explicitly
cast other tensors to `self.compute_dtype` in `call` or accept all tensors in
the first argument as a list.
The casting only occurs in TensorFlow 2. If
`tf.compat.v1.disable_v2_behavior()` has been called, you can enable the
casting behavior with `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`.
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an `AutoCastVariable`.
`AutoCastVariable` is identical to the original variable except it casts
itself to the layer's compute dtype when used within `Layer.call`. This means
if you are writing a layer, you do not have to explicitly cast the variables
to the layer's compute dtype. For example:
>>> class SimpleDense(tf.keras.layers.Layer):
...
... def build(self, input_shape):
... # With mixed precision, self.kernel is a float32 AutoCastVariable
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
...
... def call(self, inputs):
... # With mixed precision, self.kernel will be casted to float16
... return tf.linalg.matmul(inputs, self.kernel)
...
>>> layer = SimpleDense(dtype='mixed_float16')
>>> y = layer(tf.ones((10, 10)))
>>> y.dtype
tf.float16
>>> layer.kernel.dtype
tf.float32
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`,
which is useful if the float32 value of the variable must be accessed within
the layer.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in the
case of mixed precision, wraps variables with `AutoCastVariables`.
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.random.normal`, In such cases, you must create the tensor of the correct
dtype. For example, if you call `tf.random.normal`, you must pass the compute
dtype, which is the dtype the inputs have been casted to:
>>> class AddRandom(tf.keras.layers.Layer):
...
... def call(self, inputs):
... # We must pass `dtype=inputs.dtype`, otherwise a TypeError may
... # occur when adding `inputs` to `rand`.
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return inputs + rand
>>> layer = AddRandom(dtype='mixed_float16')
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a
`TypeError` would have occurred. This is because the `tf.random.normal`'s
dtype defaults to `"float32"`, but the input dtype is float16. You cannot add
a float32 tensor with a float16 tensor.
"""
def __init__(self, name):
if isinstance(name, tf.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, str):
raise TypeError("'name' must be a string, but got: %s" % (name,))
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if name in ('mixed_float16', 'mixed_bloat16'):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith('_float32_vars'):
error_msg = ('Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow.')
if name in ('infer_float32_vars', 'infer_with_float32_vars'):
error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
'policy instead.')
elif name == 'float16_with_float32_vars':
error_msg += (' Please use the \'mixed_float16\' policy instead.')
elif name == 'bfloat16_with_float32_vars':
error_msg += (' Please use the \'mixed_bfloat16\' policy instead.')
error_msg += ' Got policy name: \'%s\'' % name
raise ValueError(error_msg)
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
elif name == '_infer':
# The "_infer" policy exists only for compatibility with TF 1, where
# "_infer" is the default. The behavior matches the behavior of TF 1's
# behavior before policies were introduced. With "_infer", the computation
# and variable dtype are inferred from the first input the first time the
# layer is called. Once the layer is called for the first time, the
# layer's policy will change to the dtype of the first input, and it will
# no longer have the "_infer" policy.
#
# The infer policy should be considered an implementation detail and may
# be removed in the future.
return None, None
try:
dtype = tf.as_dtype(name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include 'mixed_float16', 'mixed_bfloat16', "
"and the name of any dtype such as 'float32'." % (name,))
raise ValueError(error)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Variable regularizers are run in the variable dtype, not the compute dtype.
Returns:
The variable dtype of this policy, as a string.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
float16 or bfloat16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32 or some other device-internal
intermediate format with higher precision than float16/bfloat16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`. But,
`tf.linalg.matmul` will do use float32 intermediate math. The performance
benefit of float16 is still apparent, due to increased memory bandwidth and
the fact modern GPUs have specialized hardware for computing matmuls on
float16 inputs while still keeping intermediate computations in float32.
Returns:
The compute dtype of this policy, as a string.
"""
return self._compute_dtype
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s">' % self._name
def get_config(self):
return {'name': self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
del custom_objects
if 'loss_scale' in config:
config = config.copy()
# Policy.get_config in TensorFlow 2.3 and below had a loss_scale. We
# silently drop it.
del config['loss_scale']
return cls(**config)
@keras_export('keras.mixed_precision.experimental.Policy', v1=[])
class PolicyV1(Policy):
"""A deprecated dtype policy for a Keras layer.
Warning: This class is now deprecated and will be removed soon. Please use the
non-experimental class `tf.keras.mixed_precision.Policy` instead.
The difference between this class and the non-experimental class is that this
class has a `loss_scale` field and the non-experimental class does not. The
loss scale is only used by `tf.keras.Model.compile`, which automatically wraps
the optimizer with a `LossScaleOptimizer` if the optimizer is not already a
`LossScaleOptimizer`. For the non-experimental Policy class, `Model.compile`
instead wraps the optimizer with a `LossScaleOptimizer` if `Policy.name` is
"mixed_float16".
When deserializing objects with an experimental policy using functions like
`tf.keras.utils.deserialize_keras_object`, the policy will be deserialized as
the non-experimental `tf.keras.mixed_precision.Policy`, and the loss scale
will silently be dropped. This is so that SavedModels that are generated
with an experimental policy can be restored after the experimental policy is
removed.
"""
def __init__(self, name, loss_scale='auto'):
"""Constructs the policy.
The `name` argument determines the compute and variable dtype, the default
loss scale, and has no additional effect on the Policy. The compute and
variable dtypes can only be specified through `name`, and cannot be
specified directly.
Args:
name: A string. Can be one of the following values:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. With 'mixed_float16',
a dynamic loss scale is used. These policies are used for mixed
precision training.
loss_scale: A `tf.compat.v1.mixed_precision.LossScale`, an int (which
uses a `FixedLossScale`), the string "dynamic" (which uses a
`DynamicLossScale`), or None (which uses no loss scale). Defaults to
`"auto"`. In the `"auto"` case: 1) if `name` is `"mixed_float16"`, then
use `loss_scale="dynamic"`. 2) otherwise, do not use a loss scale. Only
`tf.keras.Model`s, not layers, use the loss scale, and it is only used
during `Model.fit`, `Model.train_on_batch`, and other similar methods.
"""
super(PolicyV1, self).__init__(name)
if loss_scale == 'auto':
loss_scale = 'dynamic' if name == 'mixed_float16' else None
self._using_default_loss_scale = True
else:
self._using_default_loss_scale = False
if loss_scale and self._compute_dtype not in (None, 'float16'):
tf_logging.warning(
'Creating a Policy with a loss scale is only useful for '
'float16 policies. You passed loss_scale=%r for policy '
'%s. Consider not passing any loss_scale instead.' %
(loss_scale, name))
self._loss_scale = keras_loss_scale_module.get(loss_scale)
@property
def loss_scale(self):
"""Returns the loss scale of this Policy.
Returns:
A `tf.compat.v1.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale
def __repr__(self):
return '<PolicyV1 "%s", loss_scale=%s>' % (self._name, self.loss_scale)
def get_config(self):
config = {
'name': self.name
}
if not self._using_default_loss_scale:
# We only include the loss scale if the default loss scale is not used.
# This allows us to change the loss scale config format without breaking
# users who use the default loss scale.
config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'loss_scale' in config and isinstance(config['loss_scale'], dict):
config = config.copy()
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'], custom_objects=custom_objects)
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "_infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.global_policy',
'keras.mixed_precision.experimental.global_policy', v1=[])
def global_policy():
"""Returns the global dtype policy.
The global policy is the default `tf.keras.mixed_precision.Policy` used for
layers, if no policy is passed to the layer constructor. If no policy has been
set with `keras.mixed_precision.set_global_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` (floatx defaults to float32).
>>> tf.keras.mixed_precision.global_policy()
<Policy "float32">
>>> tf.keras.layers.Dense(10).dtype_policy # Defaults to the global policy
<Policy "float32">
If TensorFlow 2 behavior has been disabled with
`tf.compat.v1.disable_v2_behavior()`, this will instead return a special
"_infer" policy which infers the dtype from the dtype of the first input the
first time the layer is called. This behavior matches the behavior that
existed in TensorFlow 1.
See `tf.keras.mixed_precision.Policy` for more information on policies.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('_infer')
return _global_policy
def _check_if_mixed_precision_graph_rewrite_is_enabled(policy):
if tf.__internal__.train.is_mixed_precision_graph_rewrite_enabled():
raise ValueError(
'The global dtype policy cannot be set to "{policy.name}", because the '
'mixed precision graph rewrite has already been enabled.\n'
'At most, one of the following can be called:\n\n'
' 1. tf.compat.v1.train.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.experimental.set_policy() with a mixed '
'precision policy (You called this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. If in doubt which function to use, '
'use the second, as it supports Eager execution and is more '
'customizable.'.format(policy=policy))
@keras_export('keras.mixed_precision.set_global_policy',
'keras.mixed_precision.experimental.set_policy', v1=[])
def set_policy(policy):
"""Sets the global dtype policy.
The global policy is the default `tf.keras.mixed_precision.Policy` used for
layers, if no policy is passed to the layer constructor.
>>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
>>> tf.keras.mixed_precision.global_policy()
<Policy "mixed_float16">
>>> tf.keras.layers.Dense(10).dtype_policy
<Policy "mixed_float16">
>>> # Global policy is not used if a policy is directly passed to constructor
>>> tf.keras.layers.Dense(10, dtype='float64').dtype_policy
<Policy "float64">
>>> tf.keras.mixed_precision.set_global_policy('float32')
If no global policy is set, layers will instead default to a Policy
constructed from `tf.keras.backend.floatx()`.
To use mixed precision, the global policy should be set to `'mixed_float16'`
or `'mixed_bfloat16'`, so that every layer uses a 16-bit compute dtype and
float32 variable dtype by default.
Only floating point policies can be set as the global policy, such as
`'float32'` and `'mixed_float16'`. Non-floating point policies such as
`'int32'` and `'complex64'` cannot be set as the global policy because most
layers do not support such policies.
See `tf.keras.mixed_precision.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy. Can also
be None, in which case the global policy will be constructed from
`tf.keras.backend.floatx()`
"""
global _global_policy
if not base_layer_utils.v2_dtype_behavior_enabled():
raise ValueError('The global policy can only be set in TensorFlow 2 or if '
'V2 dtype behavior has been set. To enable V2 dtype '
'behavior, call '
'"tf.compat.v1.keras.layers.enable_v2_dtype_behavior()"')
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
is_mixed_policy = (policy is not None and
policy.compute_dtype != policy.variable_dtype)
if is_mixed_policy:
_check_if_mixed_precision_graph_rewrite_is_enabled(policy)
if (policy is not None and policy.compute_dtype is not None and
not tf.as_dtype(policy.compute_dtype).is_floating):
raise ValueError('set_policy can only be used to set the global policy to '
'floating-point policies, such as "float32" and '
'"mixed_float16", but got policy: %s'
% (policy.name,))
_global_policy = policy
tf.__internal__.train.set_using_mixed_precision_policy(is_mixed_policy)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_policy(policy)
yield
finally:
set_policy(old_policy)
def _is_convertible_to_dtype(dtype):
try:
tf.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and variable
dtypes are the same and the policy's type is Policy and not a subclass of
Policy (such as PolicyV1).
The "_infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a subclass of Policy is never
# equivalent to a dtype.
return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck
list(policy.get_config().keys()) == ['name'] and
(policy.name == '_infer' or _is_convertible_to_dtype(policy.name)))
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of Keras. If the policy name is returned, it is a dtype string
# such as 'float32'.
return None if policy.name == '_infer' else policy.name
return generic_utils.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy('_infer')
module_objects = {'Policy': Policy, 'PolicyV1': Policy}
return generic_utils.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='dtype policy')
| 24,502 | 40.601019 | 80 | py |
keras | keras-master/keras/mixed_precision/loss_scale_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for LossScaleOptimizer."""
import tensorflow.compat.v2 as tf
import time
from keras.mixed_precision import loss_scale_optimizer
from keras.optimizer_v2 import adam
def _get_strategy(num_gpus):
if num_gpus > 1:
return tf.distribute.MirroredStrategy(
['/GPU:%d' % i for i in range(num_gpus)])
else:
return tf.distribute.get_strategy() # The default strategy
class LossScaleBenchmark(tf.test.Benchmark):
"""Benchmark for loss scaling."""
def _benchmark(self, gradient_type, num_gpus, mode, loss_scaling):
"""Benchmarks loss scaling.
We run a simple model with several scalar variables. The loss is the sum of
all variables. The model is simple because we want to measure only the
performance of loss scaling, not the performance of the model itself.
Args:
gradient_type: "optimizer" or "gradient_tape". How gradients are computed.
"optimizer" uses Optimizer.minimize. "gradient_tape" uses
GradientTape.gradient along with LossScaleOptimizer.get_scaled_loss and
LossScaleOptimizer.get_unscaled_gradients.
num_gpus: The number of GPUs to use. Must be at least 1.
mode: "eager" or "tf_function". "tf_function" causes all computations to
be wrapped in a tf.function, while "eager" runs computations eagerly.
loss_scaling: "fixed", "dynamic", or None. The type of loss scaling to
use. None means use no loss scaling, which is useful as a baseline to
see how much slower loss scaling is in comparison.
"""
ls_str = loss_scaling or 'no_loss_scaling'
name = '%s_%d_GPU_%s_%s' % (gradient_type, num_gpus, mode, ls_str)
with tf.__internal__.eager_context.eager_mode(), _get_strategy(num_gpus).scope() as strategy:
opt = adam.Adam()
if loss_scaling == 'fixed':
loss_scale = tf.mixed_precision.experimental.FixedLossScale(2.)
elif loss_scaling == 'dynamic':
# Make increment_period so high that it's effectively infinite. This
# means the loss scale will never change. Any performance overhead
# from increasing/decreasing the loss scale is typically negligible
# since it happens infrequently, so we only benchmark the common case
# of the loss scale not changing.
increment_period = 1000000
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=2., increment_period=increment_period)
else:
assert loss_scaling is None
loss_scale = None
if loss_scale:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
num_vars = 200
num_warmup_iters = 1
num_iters = 20
# By using scalar variables, we reduce overhead of the actual GPU work of
# multiplying variables, dividing gradients, and checking gradients for
# NaNs. Measuring these overheads isn't very useful as there is little we
# can do to reduce them (one such way would be to fuse dividing gradients
# and checking them for NaNs). We still have all other overheads, such as
# all-reducing the `is_finite` values and having a tf.cond or
# tf.while_loop based on whether gradients are NaNs. Currently, these
# other overheads are much more significant than the GPU work.
var_list = [
tf.Variable(i, dtype='float32') for i in range(num_vars)]
def get_loss():
return tf.add_n(var_list)
if gradient_type == 'gradient_tape':
if loss_scale is None:
def minimize_fn():
with tf.GradientTape() as tape:
loss = get_loss()
grads = tape.gradient(loss, var_list)
return opt.apply_gradients(zip(grads, var_list))
else:
def minimize_fn():
with tf.GradientTape() as tape:
loss = get_loss()
scaled_loss = opt.get_scaled_loss(loss)
scaled_grads = tape.gradient(scaled_loss, var_list)
grads = opt.get_unscaled_gradients(scaled_grads)
return opt.apply_gradients(zip(grads, var_list))
else:
assert gradient_type == 'optimizer'
def minimize_fn():
return opt.minimize(get_loss, var_list)
def run_fn():
strategy.run(minimize_fn)
if mode == 'tf_function':
run_fn = tf.function(run_fn)
for _ in range(num_warmup_iters):
run_fn()
start = time.time()
for _ in range(num_iters):
run_fn()
end = time.time()
self.report_benchmark(iters=num_iters,
wall_time=(end - start) / num_iters, name=name)
def _gpus_to_test_with(self):
num_gpus = len(tf.config.list_logical_devices('GPU'))
gpus_to_test_with = []
if num_gpus >= 1:
gpus_to_test_with.append(1)
if num_gpus >= 2:
gpus_to_test_with.append(2)
if num_gpus >= 8:
gpus_to_test_with.append(8)
return gpus_to_test_with
def benchmark_optimizer(self):
for num_gpus in self._gpus_to_test_with():
for mode in 'eager', 'tf_function':
for loss_scaling in None, 'fixed', 'dynamic':
self._benchmark('optimizer', num_gpus, mode, loss_scaling)
def benchmark_gradient_tape(self):
for num_gpus in self._gpus_to_test_with():
for mode in 'eager', 'tf_function':
for loss_scaling in None, 'fixed', 'dynamic':
self._benchmark('gradient_tape', num_gpus, mode, loss_scaling)
if __name__ == '__main__':
tf.test.main()
| 6,177 | 39.379085 | 97 | py |
keras | keras-master/keras/mixed_precision/__init__.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras mixed precision API.
See [the mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) to learn how to
use the API.
"""
| 844 | 39.238095 | 80 | py |
keras | keras-master/keras/mixed_precision/autocast_variable.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains AutoCastVariable, a variable which automatically casts itself."""
import tensorflow.compat.v2 as tf
import threading
from keras.distribute import distributed_training_utils
# _autocast_dtype.dtype is the dtype AutoCastVariables should be cast to, or
# None if AutoCastVariables should not be cast.
_autocast_dtype = threading.local()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = '<unprintable>'
if '\n' in text:
text = '\n' + text
return text
class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
"""Variable that will cast itself to a different dtype in applicable contexts.
This class wraps a floating-point `tf.Variable`. It emulates the variable
interface and delegates to the wrapped variable, but it additionally will cast
the wrapped variable under an `enable_auto_cast_variables(dtype)` context
manager.
For example:
>>> v = tf.Variable(1.0, dtype=tf.float32)
>>> v = AutoCastVariable(v)
>>> tf.identity(v).dtype
tf.float32
>>> with enable_auto_cast_variables(tf.float16):
... tf.identity(v).dtype
tf.float16
The purpose of this class is to allow Keras layers to create variables in
float32, and automatically cast them to float16 or bfloat16 when the layer is
called.
"""
def __init__(self, variable):
"""Creates an AutoCastVariable instance.
Args:
variable: A floating-point resource variable to wrap.
Raises:
ValueError: If `variable` is not a floating-point resource variable
"""
if not isinstance(variable, tf.Variable):
raise ValueError('variable must be of type tf.ResourceVariable, but got: '
'%s' % variable)
if not variable.dtype.is_floating:
raise ValueError('variable must be a floating point variable but has '
'type: %s' % variable.dtype.name)
self._variable = variable
# 'delegate' means AutoCastVariable.op return self._variable.op, which will
# raise an AttributeError in Eager (as intended). If set to any other value,
# AutoCastVariable.op returns that value instead, which is used to set the
# op attribute in AutoCastVariable.assign().
self._op = 'delegate'
def _should_cast(self):
"""Returns True if this variable should be casted when accessed."""
autocast_dtype = getattr(_autocast_dtype, 'dtype', None)
return autocast_dtype is not None and self.dtype != autocast_dtype
@property
def dtype(self):
"""The dtype of the underlying variable, before any casts are done."""
return self._variable.dtype
@property
def true_dtype(self):
"""Deprecated alias of `dtype`."""
return self._variable.dtype
@property
def _cast_dtype(self):
dtype = getattr(_autocast_dtype, 'dtype', None)
return dtype or self._variable.dtype
def value(self):
val = self._variable.value()
if not self._should_cast():
return val
return tf.cast(val, self._cast_dtype)
def read_value(self):
val = self._variable.read_value()
return tf.cast(val, self._cast_dtype)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
val = self._variable.sparse_read(indices, name=name)
return tf.cast(val, self._cast_dtype)
def gather_nd(self, indices, name=None):
"""Gather slices of the variable into a Tensor."""
val = self._variable.gather_nd(indices, name=name)
return tf.cast(val, self._cast_dtype)
def __getattr__(self, name):
return getattr(self._variable, name)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts this variable to a tensor."""
if as_ref:
# This ValueError should not occur in practice since it is impossible to
# pass as_ref=True using public APIs.
raise ValueError('Cannot convert AutoCastVariable to a tensor if '
'as_ref=True is passed to convert_to_tensor')
if not self._should_cast():
return tf.convert_to_tensor(self._variable, dtype=dtype,
name=name)
if dtype is not None and not dtype.is_compatible_with(self._cast_dtype):
raise ValueError(
'Incompatible type conversion requested to type {!r} for '
'AutoCastVariable which is casted to type {!r}'.format(
dtype.name, self._cast_dtype.name))
val = tf.convert_to_tensor(
self._variable, dtype=self._variable.dtype, name=name)
return tf.cast(val, self._cast_dtype)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def __repr__(self):
if tf.executing_eagerly() and not self._in_graph_mode:
repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} "
'dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}, '
'numpy={np_repr}>')
return repr_str.format(
v=self, np_repr=numpy_text(self.read_value(), is_repr=True))
else:
repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} "
'dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}>')
return repr_str.format(v=self)
# Method delegations: We delegate the following methods to self._variable.
# Each of these methods simply calls the same method on self._variable. The
# base Variable raises NotImplementedError for most of these, so we must
# override them.
#
# We do not define the following methods from Variable for the following
# reasons:
# * 'count_up_to': This method only applies to int variables, which cannot
# be wrapped with an AutoCastVariable.
# * 'ref': Instead we inherit the definition from Variable.
# If we defined and delegated to Variable, the ref of an AutoCastVariable
# would be the same as the ref of the underlying variable, which would be
# strange as they are different Python objects.
def set_shape(self, shape):
return self._variable.set_shape(self, shape)
@property
def trainable(self):
return self._variable.trainable
@property
def synchronization(self):
return self._variable.synchronization
@property
def aggregation(self):
return self._variable.aggregation
def eval(self, session=None):
return self._variable.eval(session)
def initialized_value(self):
return self._variable.initialized_value()
@property
def initial_value(self):
return self._variable.initial_value
@property
def constraint(self):
return self._variable.constraint
def _apply_assign_update(self,
update_fn,
value,
use_locking=None,
name=None,
read_value=True):
# TODO(b/146181571): This logic can be simplified once
# DistributedVariable.assign returns a DistributedVariable. Currently for
# MirroredStrategy, it returns a Mirrored value.
if tf.compat.v1.executing_eagerly_outside_functions():
assign_op = update_fn(value, use_locking, name, False)
if read_value:
# We create a new AutoCastVariable with the same underlying tf.Variable.
# The new AutoCastVariable is identical except the 'op' attribute is
# defined. This matches the behavior of tf.Variable.assign.
var = create_autocast_variable(self._variable)
var._op = assign_op # pylint:disable=protected-access
return var
return assign_op
# Fallback to wrapping the returned variable in graph mode if possible
assign_var = update_fn(value, use_locking, name, read_value)
if read_value and tf.__internal__.ops.is_resource_variable(assign_var):
return create_autocast_variable(assign_var)
return assign_var
def _apply_update(self, update_fn, *args, **kwargs):
update_var = update_fn(*args, **kwargs)
if tf.compat.v1.executing_eagerly_outside_functions():
return self
# Fallback to wrapping the returned variable in graph mode if possible
if tf.__internal__.ops.is_resource_variable(update_var):
return create_autocast_variable(update_var)
return update_var
def assign(self, value, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(self._variable.assign, value, use_locking,
name, read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(self._variable.assign_add, delta,
use_locking, name, read_value)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(self._variable.assign_sub, delta,
use_locking, name, read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_sub, sparse_delta,
use_locking, name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_add, sparse_delta,
use_locking, name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_max, sparse_delta,
use_locking, name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_min, sparse_delta,
use_locking, name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_mul, sparse_delta,
use_locking, name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_div, sparse_delta,
use_locking, name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_update, sparse_delta,
use_locking, name)
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.batch_scatter_update, sparse_delta,
use_locking, name)
def scatter_nd_sub(self, indices, updates, name=None):
return self._apply_update(self._variable.scatter_nd_sub, indices, updates,
name)
def scatter_nd_add(self, indices, updates, name=None):
return self._apply_update(self._variable.scatter_nd_add, indices, updates,
name)
def scatter_nd_update(self, indices, updates, name=None):
return self._apply_update(self._variable.scatter_nd_update, indices,
updates, name)
def load(self, value, session=None):
return self._variable.load(value, session)
@property
def name(self):
return self._variable.name
@property
def _shared_name(self):
return self._variable._shared_name # pylint:disable=protected-access
@property
def initializer(self):
return self._variable.initializer
@property
def device(self):
return self._variable.device
@property
def op(self):
if self._op == 'delegate':
return self._variable.op
return self._op
def _as_graph_element(self):
graph_element = self._variable._as_graph_element() # pylint:disable=protected-access
if graph_element is None:
return self._op
return graph_element
@property
def graph(self):
return self._variable.graph
@property
def shape(self):
return self._variable.shape
def get_shape(self):
return self._variable.get_shape()
def _gather_saveables_for_checkpoint(self):
# By delegating this method to the wrapped variable, checkpoints with
# AutoCastVariables are identical to checkpoints with normal variables.
# Therefore models checkpointed with AutoCastVariables can be restored on
# models with normal variables, and vice versa.
return self._variable._gather_saveables_for_checkpoint() # pylint:disable=protected-access
def _map_resources(self, save_options):
# By delegating this method to the wrapped variable, SavedModel with
# AutoCastVariables are identical to SavedModel with normal variables.
obj_map, resource_map = self._variable._map_resources(save_options) # pylint:disable=protected-access
obj_map[self] = obj_map[self._variable]
return obj_map, resource_map
# TODO(reedwm): Maybe encode the fact the variable is an AutoCastVariable in
# to_proto().
def to_proto(self, export_scope=None):
return self._variable.to_proto(export_scope)
def from_proto(self, variable_def, import_scope=None):
return self._variable.from_proto(variable_def, import_scope)
# Delegate the private attributes _handle_name and _initializer_op to
# self._variable. SavedModel sets these attributes when loading a model. For
# example, it sets _handle_name here:
# https://github.com/tensorflow/tensorflow/blob/db26bd574fa95b5bdd53c08463dd19407cc0297e/tensorflow/python/keras/saving/saved_model/load.py#L211
# We need to expose these attributes on AutoCastVariable as well for
# SavedModel to work properly.
# TODO(reedwm/kathywu): Find a better way to support SavedModel. Exposing
# private attributes is hacky and difficult to maintain.
@property
def _handle_name(self):
return self._variable._handle_name # pylint: disable=protected-access
@_handle_name.setter
def _handle_name(self, handle_name):
self._variable._handle_name = handle_name # pylint: disable=protected-access
@property
def _initializer_op(self):
return self._variable._initializer_op # pylint: disable=protected-access
@_initializer_op.setter
def _initializer_op(self, initializer_op):
self._variable._initializer_op = initializer_op # pylint: disable=protected-access
# Operator overloads:
# Note we only overload operators that support floating-point types, as
# non-float variables cannot be wrapped with an AutoCastVariable.
# Also note: We call read_value() instead of value(), because value() causes
# gradients not to work properly when TPUStrategy is used: b/143380936
def __add__(self, o):
return self.read_value() + o
def __radd__(self, o):
return o + self.read_value()
def __sub__(self, o):
return self.read_value() - o
def __rsub__(self, o):
return o - self.read_value()
def __mul__(self, o):
return self.read_value() * o
def __rmul__(self, o):
return o * self.read_value()
def __truediv__(self, o):
return self.read_value() / o
def __rtruediv__(self, o):
return o / self.read_value()
def __floordiv__(self, o):
return self.read_value() // o
def __rfloordiv__(self, o):
return o // self.read_value()
def __mod__(self, o):
return self.read_value() % o
def __rmod__(self, o):
return o % self.read_value()
def __lt__(self, o):
return self.read_value() < o
def __le__(self, o):
return self.read_value() <= o
def __gt__(self, o):
return self.read_value() > o
def __ge__(self, o):
return self.read_value() >= o
def __getitem__(self, o):
return self.read_value()[o]
def __pow__(self, o, modulo=None):
return pow(self.read_value(), o, modulo)
def __rpow__(self, o):
return pow(o, self.read_value())
def __neg__(self):
return -self.read_value() # pylint: disable=invalid-unary-operand-type
def __abs__(self):
return abs(self.read_value())
def __div__(self, o):
try:
return self.read_value().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self.read_value().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self.read_value().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self.read_value().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# pylint: enable=multiple-statements
tf.register_tensor_conversion_function(AutoCastVariable,
AutoCastVariable._dense_var_to_tensor) # pylint:disable=protected-access
def create_autocast_variable(variable):
"""Creates an AutoCastVariable that wraps another variable.
This typically just returns `AutoCastVariable(variable)`. But, if the variable
is a DistributedVariable or one of its subclasses, we instead dynamically
create a class that subclasses from both AutoCastVariable and
variable.__class__. This is so the returned variable will still pass
`isinstance(variable, variable.__class__)`, which is required for
DistributedVariables and its subclasses to work properly.
Args:
variable: A floating-point resource variable to wrap.
Returns:
An AutoCastVariable that wraps the variable.
"""
if not distributed_training_utils.is_distributed_variable(variable):
return AutoCastVariable(variable)
class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):
"""An AutoCastVariable that also subclasses from variable.__class__.
variable.__class__ is either a DistributedVariable or an
AggregatingVariable.
"""
def __repr__(self):
# pylint: disable=missing-format-attribute
return ('<AutoCastDistributedVariable dtype={v.dtype.name} '
'dtype_to_cast_to={v._cast_dtype.name} '
'inner_variable={v._variable}>'
).format(v=self)
# pylint: enable=missing-format-attribute
return AutoCastDistributedVariable(variable)
class enable_auto_cast_variables: # pylint:disable=invalid-name
"""Context manager which enables the autocasting of `AutoCastVariable`s.
Under this context manager, `AutoCastVariable`s will be cast to `dtype` if
`dtype` is floating-point. Otherwise, `AutoCastVariable`s will not be cast.
"""
__slots__ = ['_dtype', '_prev_dtype']
def __init__(self, dtype):
if dtype and not dtype.is_floating:
dtype = None
self._dtype = dtype
def __enter__(self):
self._prev_dtype = getattr(_autocast_dtype, 'dtype', None)
_autocast_dtype.dtype = self._dtype
def __exit__(self, type_arg, value_arg, traceback_arg):
_autocast_dtype.dtype = self._prev_dtype
| 19,740 | 34.958106 | 146 | py |
keras | keras-master/keras/mixed_precision/get_layer_policy_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the get_layer_policy function."""
import tensorflow.compat.v2 as tf
from keras.engine import base_layer_utils
from keras.layers import core
from keras.mixed_precision import get_layer_policy
from keras.mixed_precision import policy
class GetLayerPolicyTest(tf.test.TestCase):
def test_get_layer_policy(self):
layer = core.Dense(4)
self.assertEqual(get_layer_policy.get_layer_policy(layer).name, 'float32')
p = policy.Policy('mixed_float16')
layer = core.Dense(4, dtype=p)
self.assertIs(get_layer_policy.get_layer_policy(layer), p)
layer = core.Dense(4, dtype='float64')
self.assertEqual(get_layer_policy.get_layer_policy(layer).name, 'float64')
def test_error(self):
with self.assertRaisesRegex(
ValueError, 'get_policy can only be called on a layer, but got: 1'):
get_layer_policy.get_layer_policy(1)
if __name__ == '__main__':
base_layer_utils.enable_v2_dtype_behavior()
tf.test.main()
| 1,650 | 34.12766 | 80 | py |
keras | keras-master/keras/mixed_precision/loss_scale_optimizer.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the loss scaling optimizer class."""
from keras import backend
from keras import optimizers
from keras.mixed_precision import loss_scale as keras_loss_scale_module
from keras.optimizer_v2 import optimizer_v2
from keras.optimizer_v2 import utils as optimizer_utils
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as legacy_optimizer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
class _UnwrapPreventer:
"""Wrapper that DistributionStrategy will not unwrap.
Typically, DistributionStrategy will unwrap values when going from a cross-
replica context to a replica context via `call_for_each_replica`. This class
is a wrapper that DistributionStrategy will not unwrap, so it can be used to
prevent it from unwrapping a value.
TODO(reedwm): Find/implement a better way of preventing values from being
unwrapped by DistributionStrategy
"""
__slots__ = ['value']
def __init__(self, value):
self.value = value
class _DelegatingTrackableMixin:
"""A mixin that delegates all Trackable methods to another trackable object.
This class must be used with multiple inheritance. A class that subclasses
Trackable can also subclass this class, which causes all Trackable methods to
be delegated to the trackable object passed in the constructor.
A subclass can use this mixin to appear as if it were the trackable passed to
the constructor, from a Checkpoint's perspective. LossScaleOptimizer uses this
mixin, so that the checkpoint format for a LossScaleOptimizer is identical to
the checkpoint format for a normal optimizer. This allows a model to be saved
with a normal Optimizer and restored with a LossScaleOptimizer, or vice versa.
The only difference in checkpoint format is that the loss scale is also saved
with a LossScaleOptimizer.
"""
def __init__(self, trackable_obj):
self._trackable = trackable_obj
# pylint: disable=protected-access
@property
def _setattr_tracking(self):
return self._trackable._setattr_tracking
@_setattr_tracking.setter
def _setattr_tracking(self, value):
self._trackable._setattr_tracking = value
@property
def _update_uid(self):
return self._trackable._update_uid
@_update_uid.setter
def _update_uid(self, value):
self._trackable._update_uid = value
@property
def _unconditional_checkpoint_dependencies(self):
return self._trackable._unconditional_checkpoint_dependencies
@property
def _unconditional_dependency_names(self):
return self._trackable._unconditional_dependency_names
@property
def _name_based_restores(self):
return self._trackable._name_based_restores
def _maybe_initialize_trackable(self):
return self._trackable._maybe_initialize_trackable()
@property
def _object_identifier(self):
return self._trackable._object_identifier
@property
def _tracking_metadata(self):
return self._trackable._tracking_metadata
def _no_dependency(self, value):
return self._trackable._no_dependency(value)
def _name_based_attribute_restore(self, checkpoint):
return self._trackable._name_based_attribute_restore(checkpoint)
@property
def _checkpoint_dependencies(self):
return self._trackable._checkpoint_dependencies
@property
def _deferred_dependencies(self):
return self._trackable._deferred_dependencies
def _lookup_dependency(self, name):
self._trackable._lookup_dependency(name)
def _add_variable_with_custom_getter(self,
name,
shape=None,
dtype=tf.float32,
initializer=None,
getter=None,
overwrite=False,
**kwargs_for_getter):
return self._trackable._add_variable_with_custom_getter(
name, shape, dtype, initializer, getter, overwrite, **kwargs_for_getter)
def _preload_simple_restoration(self, name):
return self._trackable._preload_simple_restoration(name)
def _track_trackable(self, trackable, name, overwrite=False): # pylint: disable=redefined-outer-name
return self._trackable._track_trackable(trackable, name, overwrite)
def _handle_deferred_dependencies(self, name, trackable): # pylint: disable=redefined-outer-name
return self._trackable._handle_deferred_dependencies(name, trackable)
def _restore_from_checkpoint_position(self, checkpoint_position):
return self._trackable._restore_from_checkpoint_position(
checkpoint_position)
def _single_restoration_from_checkpoint_position(self, checkpoint_position,
visit_queue):
return self._trackable._single_restoration_from_checkpoint_position(
checkpoint_position, visit_queue)
def _gather_saveables_for_checkpoint(self):
return self._trackable._gather_saveables_for_checkpoint()
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return self._trackable._list_extra_dependencies_for_serialization(
serialization_cache)
def _list_functions_for_serialization(self, serialization_cache):
return self._trackable._list_functions_for_serialization(
serialization_cache)
# pylint: enable=protected-access
def _is_all_finite(grads):
"""Returns a scalar boolean tensor indicating if all gradients are finite."""
is_finite_per_grad = [
tf.reduce_all(tf.math.is_finite(g)) for g in grads if g is not None
]
return tf.reduce_all(is_finite_per_grad)
def _op_in_graph_mode(tensor):
"""Returns the tensor's op in graph mode, or the tensor in eager mode.
This is useful because sometimes an op is needed in graph mode instead of a
tensor. In eager mode, there are no ops.
Args:
tensor: A tensor.
Returns:
The tensor's op in graph mode. The tensor in eager mode.
"""
if tf.executing_eagerly():
return tensor
return tensor.op
def _assign_if_finite(var, value):
"""Assigns a value to a variable if the value is finite."""
return tf.cond(
tf.math.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)),
tf.no_op)
class _DynamicLossScaleState(tf.__internal__.tracking.Trackable):
"""The state of a dynamic loss scale."""
def __init__(self,
initial_loss_scale,
growth_steps,
multiplier):
"""Creates the dynamic loss scale."""
super(_DynamicLossScaleState, self).__init__()
self._initial_loss_scale = float(initial_loss_scale)
self._growth_steps = int(growth_steps)
self._multiplier = float(multiplier)
self._weights = {}
self._current_loss_scale = self._add_weight(
name='current_loss_scale',
dtype=tf.float32,
initial_value=self._initial_loss_scale)
# The number of consecutive steps with finite gradients since the last
# nonfinite gradient or change in loss scale. The name is 'good_steps' for
# backwards compatibility with older checkpoints.
self._counter = self._add_weight(
name='good_steps', dtype=tf.int64, initial_value=0)
def _add_weight(self, name, initial_value, dtype=None):
"""Adds a weight to this loss scale.
Args:
name: Variable name.
initial_value: The variable's initial value.
dtype: The type of the variable.
Returns:
A variable.
Raises:
RuntimeError: If a weight with `name` has already been added.
"""
variable = tf.Variable(
initial_value=initial_value,
name=name,
dtype=dtype,
trainable=False,
synchronization=tf.VariableSynchronization.AUTO,
# Set aggregation to NONE, as loss scaling variables should never be
# aggregated.
aggregation=tf.VariableAggregation.NONE)
if tf.executing_eagerly():
graph_key = None
else:
graph = tf.compat.v1.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
key = (name, graph_key)
self._weights[key] = variable
self._handle_deferred_dependencies(name=name, trackable=variable)
backend.track_variable(variable)
return variable
@property
def _checkpoint_dependencies(self):
"""From Trackable. Gather graph-specific weights to save."""
if tf.executing_eagerly():
graph_key = None
else:
graph = tf.compat.v1.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
weights = []
for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):
if g == graph_key:
weights.append(tf.__internal__.tracking.TrackableReference(name=name, ref=v))
return (super(_DynamicLossScaleState, self)._checkpoint_dependencies +
weights)
def _lookup_dependency(self, name):
"""From Trackable. Find a weight in the current graph."""
unconditional = super(_DynamicLossScaleState, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
if tf.executing_eagerly():
graph_key = None
else:
graph = tf.compat.v1.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
return self._weights.get((name, graph_key), None)
@property
def initial_loss_scale(self):
return self._initial_loss_scale
@property
def growth_steps(self):
return self._growth_steps
@property
def multiplier(self):
return self._multiplier
@property
def current_loss_scale(self):
"""Returns the current loss scale as a float32 `tf.Variable`."""
return self._current_loss_scale
@property
def counter(self):
"""Returns the counter as a float32 `tf.Variable`."""
return self._counter
def __call__(self):
"""Returns the current loss scale as a scalar `float32` tensor."""
return tf.convert_to_tensor(self._current_loss_scale)
def update(self, grads):
"""Updates the value of the loss scale.
Args:
grads: A nested structure of unscaled gradients, each which is an
all-reduced gradient of the loss with respect to a weight.
Returns:
update_op: In eager mode, None. In graph mode, an op to update the loss
scale.
should_apply_gradients: Either a bool or a scalar boolean tensor. If
False, the caller should skip applying `grads` to the variables this
step.
"""
grads = tf.nest.flatten(grads)
if tf.distribute.has_strategy(
) and tf.distribute.in_cross_replica_context():
distribution = tf.distribute.get_strategy()
is_finite_per_replica = distribution.extended.call_for_each_replica(
_is_all_finite, args=(grads,))
# Each replica computed the same `is_finite` value, since `grads` is
# all-reduced across replicas. Arbitrarily take `is_finite` from the first
# replica.
is_finite = (
distribution.experimental_local_results(is_finite_per_replica)[0])
else:
is_finite = _is_all_finite(grads)
def update_if_finite_grads():
"""Update assuming the gradients are finite."""
def incr_loss_scale():
new_loss_scale = self.current_loss_scale * self.multiplier
return tf.group(
_assign_if_finite(self.current_loss_scale, new_loss_scale),
self.counter.assign(0))
return tf.cond(
self.counter + 1 >= self.growth_steps,
incr_loss_scale,
lambda: _op_in_graph_mode(self.counter.assign_add(1)))
def update_if_not_finite_grads():
"""Update assuming the gradients are nonfinite."""
new_loss_scale = tf.maximum(
self.current_loss_scale / self.multiplier, 1)
return tf.group(
self.counter.assign(0),
self.current_loss_scale.assign(new_loss_scale))
update_op = tf.cond(is_finite,
update_if_finite_grads,
update_if_not_finite_grads)
should_apply_gradients = is_finite
return update_op, should_apply_gradients
# See LossScaleOptimizer docstring for why this is so big
_DEFAULT_INITIAL_SCALE = 2 ** 15
_DEFAULT_GROWTH_STEPS = 2000
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.LossScaleOptimizer')
class LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling to prevent numeric underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
intermediate gradients to be scaled by the loss scale as well. The final
gradients are divided (or "unscaled") by the loss scale to bring them back to
their original value.
`LossScaleOptimizer` wraps another optimizer and applies loss scaling to it.
By default, the loss scale is dynamically updated over time so you do not have
to choose the loss scale. The `minimize` method automatically scales the loss,
unscales the gradients, and updates the loss scale so all you have to do is
wrap your optimizer with a `LossScaleOptimizer` if you use `minimize`. For
example:
>>> opt = tf.keras.optimizers.SGD(0.25)
>>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
>>> var = tf.Variable(1.)
>>> loss_fn = lambda: var ** 2
>>> # 'minimize' applies loss scaling and updates the loss sale.
>>> opt.minimize(loss_fn, var_list=var)
>>> var.numpy()
0.5
If a `tf.GradientTape` is used to compute gradients instead of `minimize`, you
must scale the loss and gradients manually. This can be done with the
`LossScaleOptimizer.get_scaled_loss` and
`LossScaleOptimizer.get_unscaled_gradients` methods. For example:
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
... scaled_loss = opt.get_scaled_loss(loss)
>>> scaled_grad = tape.gradient(scaled_loss, var)
>>> (grad,) = opt.get_unscaled_gradients([scaled_grad])
>>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here
>>> var.numpy()
0.25
Warning: If you forget to call `get_scaled_loss` or `get_unscaled_gradients`
(or both) when using a `tf.GradientTape`, the model will likely converge to a
worse quality. Please make sure you call each function exactly once.
When mixed precision with float16 is used, there is typically no risk of
underflow affecting model quality if loss scaling is properly used. See
[the mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for more information
on how to use mixed precision.
Args:
inner_optimizer: The `tf.keras.optimizers.Optimizer` instance to wrap.
dynamic: Bool indicating whether dynamic loss scaling is used. Defaults to
True. If True, the loss scale will be dynamically updated over time using
an algorithm that keeps the loss scale at approximately its optimal value.
If False, a single fixed loss scale is used and `initial_scale` must be
specified, which is used as the loss scale. Recommended to keep as True,
as choosing a fixed loss scale can be tricky. Currently, there is a small
performance overhead to dynamic loss scaling compared to fixed loss
scaling.
initial_scale: The initial loss scale. If `dynamic` is True, this defaults
to `2 ** 15`. If `dynamic` is False, this must be specified and acts as
the sole loss scale, as the loss scale does not change over time. When
dynamic loss scaling is used, is better for this to be a very high number,
because a loss scale that is too high gets lowered far more quickly than a
loss scale that is too low gets raised.
dynamic_growth_steps: With dynamic loss scaling, every
`dynamic_growth_steps` steps with finite gradients, the loss scale is
doubled. Defaults to 2000. If a nonfinite gradient is encountered, the
count is reset back to zero, gradients are skipped that step, and the loss
scale is halved. The count can be queried with
`LossScaleOptimizer.dynamic_counter`. This argument can only be specified
if `dynamic` is True.
`LossScaleOptimizer` will occasionally skip applying gradients to the
variables, in which case the trainable variables will not change that step.
This is done because the dynamic loss scale will sometimes be raised too
high, causing overflow in the gradients. Typically, the first 2 to 15 steps of
the model are skipped as the initial loss scale is very high, but afterwards
steps will only be skipped on average 0.05% of the time (the fraction of steps
skipped is `1 / dynamic_growth_steps`).
`LossScaleOptimizer` delegates all public `Optimizer` methods to the inner
optimizer. Additionally, in methods `minimize` and `get_gradients`, it scales
the loss and unscales the gradients. In methods `minimize` and
`apply_gradients`, it additionally updates the loss scale and skips applying
gradients if any gradient has a nonfinite value.
### Hyperparameters
Hyperparameters can be accessed and set on the LossScaleOptimizer, which will
be delegated to the wrapped optimizer.
>>> opt = tf.keras.optimizers.Adam(beta_1=0.8, epsilon=1e-5)
>>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
>>> opt.beta_1 # Equivalent to `opt.inner_optimizer.beta_1`
0.8
>>> opt.beta_1 = 0.7 # Equivalent to `opt.inner_optimizer.beta_1 = 0.7`
>>> opt.beta_1
0.7
>>> opt.inner_optimizer.beta_1
0.7
However, accessing or setting non-hyperparameters is not delegated to the
LossScaleOptimizer. In an Adam optimizer, `beta_1` is a hyperparameter but
`epsilon` is not, as the Adam optimizer only calls `Optimizer._set_hyper` on
`beta_1`.
>>> opt.inner_optimizer.epsilon
1e-5
>>> opt.epsilon
Traceback (most recent call last):
...
AttributeError: 'LossScaleOptimizer' object has no attribute 'epsilon'
>>> opt.epsilon = 1e-4 # This does NOT set epsilon on `opt.inner_optimizer`
>>> opt.inner_optimizer.epsilon
>>> 1e-5
In the above example, despite epsilon being set on the LossScaleOptimizer, the
old epsilon value will still be used when training as epsilon was not set on
the inner optimizer.
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self, inner_optimizer, dynamic=True, initial_scale=None,
dynamic_growth_steps=None):
if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2):
msg = ('"inner_optimizer" must be an instance of '
'`tf.keras.optimizers.Optimizer`, but got: %s. ' % inner_optimizer)
if isinstance(inner_optimizer, legacy_optimizer.OptimizerV2):
msg += ('Please make sure "inner_optimizer" is not an instance of '
'`tensorflow.python.keras.optimizers`, which is '
'the legacy keras code and will be removed in future release. '
'Please use the tf.keras public API instead.')
raise TypeError(msg)
if not isinstance(dynamic, bool):
# Catch errors if a user incorrectly passes a string or float to the
# second argument argument, as this is commonly done for
# LossScaleOptimizerV1.
raise TypeError('"dynamic" argument to LossScaleOptimizer.__init__ must '
'be a bool, but got: %r' % (dynamic,))
if isinstance(inner_optimizer, LossScaleOptimizer):
raise TypeError('LossScaleOptimizer cannot wrap another '
'LossScaleOptimizer, but got: %s' % (inner_optimizer,))
self._raise_if_strategy_unsupported()
if getattr(inner_optimizer, '_is_wrapped_by_loss_scale_optimizer', False):
# TODO(reedwm): Maybe support this. The difficulty is that LSO has the
# same checkpoint format as the inner optimizer, so multiple LSOs wrapping
# the same optimizer causes the checkpointing logic to become confused.
raise ValueError('"inner_optimizer" is already wrapped by a '
'LossScaleOptimizer. An optimizer can only be wrapped '
'by a single LossScaleOptimizer')
self._optimizer = inner_optimizer
self._optimizer._is_wrapped_by_loss_scale_optimizer = True
# We don't call super().__init__, since we do not want to call OptimizerV2's
# constructor.
_DelegatingTrackableMixin.__init__(self, self._optimizer)
if dynamic:
if initial_scale is None:
initial_scale = _DEFAULT_INITIAL_SCALE
if dynamic_growth_steps is None:
dynamic_growth_steps = _DEFAULT_GROWTH_STEPS
self._loss_scale = _DynamicLossScaleState(
initial_scale, dynamic_growth_steps, multiplier=2)
self._track_trackable(self._loss_scale, 'loss_scale')
else:
if initial_scale is None:
raise ValueError('"initial_scale" must be specified if "dynamic" is '
'False')
self._loss_scale = float(initial_scale)
if dynamic_growth_steps is not None:
raise ValueError('"dynamic_growth_steps" must be None if "dynamic" '
'is False, but got: %s' % (dynamic_growth_steps,))
# To support restoring TensorFlow 2.2 checkpoints.
self._track_trackable(FakeOptimizerForRestoration(self._optimizer),
'base_optimizer')
@property
def dynamic(self):
"""Bool indicating whether dynamic loss scaling is used."""
return isinstance(self._loss_scale, _DynamicLossScaleState)
@property
def loss_scale(self):
"""The current loss scale as a float32 scalar tensor."""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return tf.convert_to_tensor(
self._loss_scale.current_loss_scale)
else:
return tf.convert_to_tensor(self._loss_scale)
@property
def dynamic_counter(self):
"""The number of steps since the loss scale was last increased or decreased.
This is None if `LossScaleOptimizer.dynamic` is False.
The counter is incremented every step. Once it reaches
`LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be doubled
and the counter will be reset back to zero. If nonfinite gradients are
encountered, the loss scale will be halved and the counter will be reset
back to zero.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.counter
else:
return None
@property
def initial_scale(self):
"""The initial loss scale.
If `LossScaleOptimizer.dynamic` is False, this is the same number as
`LossScaleOptimizer.loss_scale`, as the loss scale never changes.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.initial_loss_scale
else:
return self._loss_scale
@property
def dynamic_growth_steps(self):
"""The number of steps it takes to increase the loss scale.
This is None if `LossScaleOptimizer.dynamic` is False.
Every `dynamic_growth_steps` consecutive steps with finite gradients, the
loss scale is increased.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.growth_steps
else:
return None
@property
def inner_optimizer(self):
"""The optimizer that this LossScaleOptimizer is wrapping."""
return self._optimizer
def get_scaled_loss(self, loss):
"""Scales the loss by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to scale the loss before
passing the loss to `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_unscaled_gradients` should also be called.
See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for
an example.
Args:
loss: The loss, which will be multiplied by the loss scale. Can either be
a tensor or a callable returning a tensor.
Returns:
`loss` multiplied by `LossScaleOptimizer.loss_scale`.
"""
if callable(loss):
def new_loss():
loss_val = loss()
return loss_val * tf.cast(self.loss_scale, loss_val.dtype)
return new_loss
else:
return loss * tf.cast(self.loss_scale, loss.dtype)
def get_unscaled_gradients(self, grads):
"""Unscales the gradients by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to unscale the gradients
after computing them with `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_scaled_loss` should also be called. See
the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an
example.
Args:
grads: A list of tensors, each which will be divided by the loss scale.
Can have None values, which are ignored.
Returns:
A new list the same size as `grads`, where every non-None value in `grads`
is divided by `LossScaleOptimizer.loss_scale`.
"""
loss_scale_reciprocal = 1. / self.loss_scale
return [
_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None
for g in grads
]
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
tape = tf.GradientTape() if tape is None else tape
with tape:
loss = self.get_scaled_loss(loss)
grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access
loss,
var_list,
grad_loss,
tape=tape)
grads = [g for g, _ in grads_and_vars]
weights = [v for _, v in grads_and_vars]
unscaled_grads = self.get_unscaled_gradients(grads)
return list(zip(unscaled_grads, weights))
def get_gradients(self, loss, params):
loss = self.get_scaled_loss(loss)
grads = self._optimizer.get_gradients(loss, params)
return self.get_unscaled_gradients(grads)
def _create_all_weights(self, var_list):
self._optimizer._create_all_weights(var_list) # pylint: disable=protected-access
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
if tf.distribute.in_cross_replica_context():
raise ValueError('apply_gradients() must be called in a replica context.')
# We check for the strategy here despite already checking in the constructor
# as frequently the optimizer is created outside the strategy's scope.
self._raise_if_strategy_unsupported()
grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
if experimental_aggregate_gradients:
# We must aggregate the gradients here instead of in
# self.optimizer.apply_gradients, so that any NaN or Inf gradients are
# propagated to each replica. If any replica has a NaN or Inf gradient,
# they must all have a NaN or Inf gradient so that they all skip the step.
# pylint: disable=protected-access
grads_and_vars = self._optimizer._transform_unaggregated_gradients(
grads_and_vars)
grads_and_vars = self._optimizer._aggregate_gradients(grads_and_vars)
# pylint: enable=protected-access
grads_and_vars = tuple(grads_and_vars)
grads = [g for g, _ in grads_and_vars]
# We do not want DistributionStrategy to unwrap any MirroredVariables in
# grads_and_vars, because even in a replica context, the wrapped
# optimizer expects mirrored variables. So we wrap the variables with an
# _UnwrapPreventer, preventing DistributionStrategy from unwrapping the
# MirroredVariables.
wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])
def do_not_apply_fn():
# Normally self._optimizer.iterations is incremented in
# self._optimizer.apply_gradients(). Since that is not called in this
# branch, we increment it here instead.
return self._optimizer.iterations.assign_add(1, read_value=False)
def _if_should_apply_grads(grads):
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.update(grads)
else:
return (tf.no_op(), True)
if optimizer_utils.strategy_supports_no_merge_call():
loss_scale_update_op, should_apply_grads = _if_should_apply_grads(grads)
def apply_fn():
return self._apply_gradients(grads, wrapped_vars, name)
maybe_apply_op = tf.__internal__.smart_cond.smart_cond(should_apply_grads, apply_fn,
do_not_apply_fn)
return tf.group(maybe_apply_op, loss_scale_update_op)
else:
def _apply_gradients_cross_replica(distribution, grads, wrapped_vars,
name):
loss_scale_update_op, should_apply_grads = _if_should_apply_grads(grads)
def apply_fn():
return distribution.extended.call_for_each_replica(
self._apply_gradients,
args=(grads, wrapped_vars, name))
# Note: We must call this cond() in a cross-replica context.
# DistributionStrategy does not support having a cond in a replica
# context with a branch that calls `merge_call`, and
# self._optimizer.apply_gradients calls `merge_call`.
maybe_apply_op = tf.__internal__.smart_cond.smart_cond(should_apply_grads, apply_fn,
do_not_apply_fn)
return tf.group(maybe_apply_op, loss_scale_update_op)
return tf.distribute.get_replica_context().merge_call(
_apply_gradients_cross_replica,
args=(grads, wrapped_vars, name))
def _apply_gradients(self, grads, wrapped_vars, name):
# Pass experimental_aggregate_gradients=False since LossScaleOptimizer
# already aggregated the gradients.
# TODO(reedwm): This will raise a fairly cryptic error message if
# self._optimizer.apply_gradients does not take
# experimental_aggregate_gradients.
return self._optimizer.apply_gradients(
list(zip(grads, wrapped_vars.value)), name,
experimental_aggregate_gradients=False)
def get_config(self):
serialized_optimizer = optimizers.serialize(self._optimizer)
return {
'inner_optimizer': serialized_optimizer,
'dynamic': self.dynamic,
'initial_scale': self.initial_scale,
'dynamic_growth_steps': self.dynamic_growth_steps,
}
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy() # Make a copy, since we mutate config
if 'loss_scale' in config:
# If loss_scale is in config, we assume we are deserializing a
# LossScaleOptimizer from TF 2.3 or below. We convert the config so it
# can be deserialized in the current LossScaleOptimizer.
loss_scale = keras_loss_scale_module.deserialize(
config.pop('loss_scale'))
if isinstance(loss_scale, tf.mixed_precision.experimental.FixedLossScale):
config['dynamic'] = False
config['initial_scale'] = loss_scale._loss_scale_value # pylint: disable=protected-access
elif isinstance(loss_scale, tf.mixed_precision.experimental.DynamicLossScale):
config['dynamic'] = True
config['initial_scale'] = loss_scale.initial_loss_scale
config['dynamic_growth_steps'] = loss_scale.increment_period
if loss_scale.multiplier != 2:
raise ValueError('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: %s' % (loss_scale,))
else:
raise ValueError(
'Serialized LossScaleOptimizers with a LossScale that is neither a '
'FixedLossScale nor a DynamicLossScale can no longer be '
'deserialized')
config['inner_optimizer'] = config.pop('optimizer')
config['inner_optimizer'] = optimizers.deserialize(
config['inner_optimizer'], custom_objects=custom_objects)
return cls(**config)
def _raise_if_strategy_unsupported(self):
if not strategy_supports_loss_scaling():
strategy = tf.distribute.get_strategy()
if isinstance(strategy,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy,
tf.distribute.TPUStrategy)):
raise ValueError(
'Loss scaling is not supported with TPUStrategy. Loss scaling is '
'unnecessary with TPUs, since they support bfloat16 instead of '
'float16 and bfloat16 does not require loss scaling. You should '
'remove the use of the LossScaleOptimizer when TPUs are used.')
else:
raise ValueError('Loss scaling is not supported with the '
'tf.distribute.Strategy: %s. Try using a different '
'Strategy, e.g. a MirroredStrategy' %
strategy.__class__.__name__)
# Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer
# below.
@property
def iterations(self):
return self._optimizer.iterations
@iterations.setter
def iterations(self, variable):
self._optimizer.iterations = variable
def get_slot_names(self):
return self._optimizer.get_slot_names()
def variables(self):
return self._optimizer.variables()
@property
def weights(self):
return self._optimizer.weights
def get_weights(self):
return self._optimizer.get_weights()
def set_weights(self, weights):
return self._optimizer.set_weights(weights)
@property
def clipnorm(self):
return self._optimizer.clipnorm
@clipnorm.setter
def clipnorm(self, val):
self._optimizer.clipnorm = val
@property
def global_clipnorm(self):
return self._optimizer.global_clipnorm
@global_clipnorm.setter
def global_clipnorm(self, val):
self._optimizer.global_clipnorm = val
@property
def clipvalue(self):
return self._optimizer.clipvalue
@clipvalue.setter
def clipvalue(self, val):
self._optimizer.clipvalue = val
def _aggregate_gradients(self, grads_and_vars):
return self._optimizer._aggregate_gradients(grads_and_vars) # pylint: disable=protected-access
def _restore_slot_variable(self, slot_name, variable, slot_variable):
return self._optimizer._restore_slot_variable(slot_name, variable, # pylint: disable=protected-access
slot_variable)
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
def get_slot(self, var, slot_name):
return self._optimizer.get_slot(var, slot_name)
def add_slot(self, var, slot_name, initializer='zeros'):
return self._optimizer.add_slot(var, slot_name, initializer)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError as e:
if name == '_optimizer' or name == '_hyper':
# Avoid infinite recursion
raise e
# Delegate hyperparameter accesses to inner optimizer.
if name == 'lr':
name = 'learning_rate'
if name in self._optimizer._hyper:
return self._optimizer._get_hyper(name)
raise e
def __dir__(self):
result = set(super(LossScaleOptimizer, self).__dir__())
if '_optimizer' in result:
result |= self._optimizer._hyper.keys()
if 'learning_rate' in self._optimizer._hyper.keys():
result.add('lr')
return list(result)
def __setattr__(self, name, value):
if name == 'lr':
name = 'learning_rate'
# Delegate setting hyperparameter to inner optimizer if the attribute does
# not exist on the LossScaleOptimizer
try:
# We cannot check for the 'iterations' attribute as it cannot be set after
# it is accessed.
if name != 'iterations':
object.__getattribute__(self, name)
has_attribute = True
except AttributeError:
has_attribute = False
if (name != '_optimizer' and name in self._optimizer._hyper
and not has_attribute):
self._optimizer._set_hyper(name, value)
else:
super(LossScaleOptimizer, self).__setattr__(name, value)
# Explicitly delegate learning_rate. Normally hyperparameters are delegated in
# __getattribute__, but if a hyperparameter is not in self._optimizer._hyper
# (e.g. because self._optimizer itself wraps another optimizer), then it won't
# be delegated. Since learning_rate is a very commonly accessed
# hyperparameter, we delegate it here.
@property
def learning_rate(self):
return self._optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, value):
self._optimizer.learning_rate = value
@property
def lr(self):
return self._optimizer.learning_rate
@lr.setter
def lr(self, value):
self._optimizer.lr = value
# We do not override some OptimizerV2 methods. For each, we describe why we do
# not delegate them to self._optimizer:
# * get_updates: get_updates() calls get_gradients(). Since we override
# get_gradients(), we cannot delegate get_updates() to self._optimizer,
# otherwise the overridden get_gradients() method would not be called.
# Luckily, get_updates() does not access any OptimizerV2 fields, so
# inheriting the OptimizerV2 version works fine.
# * minimize: We don't delegate for a similar as get_updates(): it calls
# both self._compute_gradients() and self.apply_gradients(), and both need
# to have the LossScaleOptimizer version called.
# TODO(reedwm): Maybe throw an error if mixed precision is used without this
# optimizer being used.
@keras_export('keras.mixed_precision.experimental.LossScaleOptimizer')
class LossScaleOptimizerV1(LossScaleOptimizer):
"""An deprecated optimizer that applies loss scaling.
Warning: This class is deprecated and will be removed in a future version of
TensorFlow. Please use the non-experimental class
`tf.keras.mixed_precision.LossScaleOptimizer` instead.
This class is identical to the non-experimental
`keras.mixed_precision.LossScaleOptimizer` except its constructor takes
different arguments. For this class (the experimental version), the
constructor takes a `loss_scale` argument. For the non-experimental class,
the constructor encodes the loss scaling information in multiple arguments.
Note that unlike this class, the non-experimental class does not accept a
`tf.compat.v1.mixed_precision.LossScale`, which is deprecated.
If you currently use this class, you should switch to the non-experimental
`tf.keras.mixed_precision.LossScaleOptimizer` instead. We show several
examples of converting the use of the experimental class to the equivalent
non-experimental class.
>>> # In all of the the examples below, `opt1` and `opt2` are identical
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale='dynamic')
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD())
>>> assert opt1.get_config() == opt2.get_config()
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale=123)
>>> # dynamic=False indicates to use fixed loss scaling. initial_scale=123
>>> # refers to the initial loss scale, which is the single fixed loss scale
>>> # when dynamic=False.
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), dynamic=False, initial_scale=123)
>>> assert opt1.get_config() == opt2.get_config()
>>> loss_scale = tf.compat.v1.mixed_precision.experimental.DynamicLossScale(
... initial_loss_scale=2048, increment_period=500)
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale=loss_scale)
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), initial_scale=2048,
... dynamic_growth_steps=500)
>>> assert opt1.get_config() == opt2.get_config()
Make sure to also switch from this class to the non-experimental class in
isinstance checks, if you have any. If you do not do this, your model may run
into hard-to-debug issues, as the experimental `LossScaleOptimizer` subclasses
the non-experimental `LossScaleOptimizer`, but not vice versa. It is safe to
switch isinstance checks to the non-experimental `LossScaleOptimizer` even
before using the non-experimental `LossScaleOptimizer`.
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale='dynamic')
>>> # The experimental class subclasses the non-experimental class
>>> isinstance(opt1, tf.keras.mixed_precision.LossScaleOptimizer)
True
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD())
>>> # The non-experimental class does NOT subclass the experimental class.
>>> isinstance(opt2, tf.keras.mixed_precision.experimental.LossScaleOptimizer)
False
Args:
optimizer: The Optimizer instance to wrap.
loss_scale: The loss scale to scale the loss and gradients. This can
either be an int/float to use a fixed loss scale, the string "dynamic"
to use dynamic loss scaling, or an instance of a LossScale. The string
"dynamic" equivalent to passing `DynamicLossScale()`, and passing an
int/float is equivalent to passing a FixedLossScale with the given loss
scale. If a DynamicLossScale is passed, DynamicLossScale.multiplier must
be 2 (the default).
"""
def __init__(self, optimizer, loss_scale):
warn_msg_prefix = (
'tf.keras.mixed_precision.experimental.LossScaleOptimizer is '
'deprecated. Please use tf.keras.mixed_precision.LossScaleOptimizer '
'instead. ')
if isinstance(loss_scale, dict):
loss_scale = keras_loss_scale_module.deserialize(loss_scale)
if isinstance(loss_scale, (int, float)):
tf_logging.warning(
warn_msg_prefix + 'For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt, dynamic=False, initial_scale={})'.format(loss_scale))
super(LossScaleOptimizerV1, self).__init__(optimizer, dynamic=False,
initial_scale=loss_scale)
elif isinstance(loss_scale, tf.mixed_precision.experimental.FixedLossScale):
ls_val = loss_scale._loss_scale_value # pylint: disable=protected-access
tf_logging.warning(
warn_msg_prefix + 'For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt, dynamic=False, initial_scale={})'.format(ls_val))
super(LossScaleOptimizerV1, self).__init__(optimizer, dynamic=False,
initial_scale=ls_val)
elif loss_scale == 'dynamic':
tf_logging.warning(
warn_msg_prefix + 'For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt)')
super(LossScaleOptimizerV1, self).__init__(optimizer)
elif isinstance(loss_scale, tf.mixed_precision.experimental.DynamicLossScale):
kwargs = {}
extra_arguments = ''
if loss_scale.initial_loss_scale != _DEFAULT_INITIAL_SCALE:
kwargs['initial_scale'] = loss_scale.initial_loss_scale
extra_arguments += (', initial_scale=%s' %
loss_scale.initial_loss_scale)
if loss_scale.increment_period != _DEFAULT_GROWTH_STEPS:
kwargs['dynamic_growth_steps'] = loss_scale.increment_period
extra_arguments += (', dynamic_growth_steps=%s' %
loss_scale.increment_period)
if loss_scale.multiplier != 2:
raise ValueError('When passing a DynamicLossScale to "loss_scale", '
'DynamicLossScale.multiplier must be 2. Got: %s'
% (loss_scale,))
tf_logging.warning(
warn_msg_prefix +
'Note that the non-experimental LossScaleOptimizer does not take a '
'DynamicLossScale but instead takes the dynamic configuration '
'directly in the constructor. For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt{})\n'.format(extra_arguments))
super(LossScaleOptimizerV1, self).__init__(optimizer, **kwargs)
elif isinstance(loss_scale, tf.mixed_precision.experimental.LossScale):
raise TypeError('Passing a LossScale that is not a FixedLossScale or a '
'DynamicLossScale is no longer supported. Got: {}'
.format(loss_scale))
else:
raise ValueError('Invalid value passed to loss_scale. loss_scale '
'must be the string "dynamic" (recommended), an int, '
'a float, a FixedLossScale, or a DynamicLossScale. Got '
'value: {}'.format(loss_scale))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy() # Make a copy, since we mutate config
# If loss_scale is in config, we assume we are deserializing a
# LossScaleOptimizer from TF 2.3 or below. Otherwise, we assume we are
# deserializing a LossScaleOptimizer from TF 2.4 or above.
if 'loss_scale' in config:
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'])
if (isinstance(config['loss_scale'], tf.mixed_precision.experimental.DynamicLossScale)
and config['loss_scale'].multiplier != 2):
raise ValueError('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: %s' % (config['loss_scale'],))
config['optimizer'] = optimizers.deserialize(
config['optimizer'], custom_objects=custom_objects)
return cls(**config)
# We convert the config, as generated by LossScaleOptimizer.get_config, to a
# version that can be passed to LossScaleOptimizerV1.__init__
if config['dynamic']:
config['loss_scale'] = tf.mixed_precision.experimental.DynamicLossScale(
config['initial_scale'], config['dynamic_growth_steps'], multiplier=2)
else:
config['loss_scale'] = tf.mixed_precision.experimental.FixedLossScale(
config['initial_scale'])
del config['dynamic']
del config['initial_scale']
del config['dynamic_growth_steps']
config['optimizer'] = optimizers.deserialize(
config.pop('inner_optimizer'), custom_objects=custom_objects)
return cls(**config)
class FakeOptimizerForRestoration(tf.__internal__.tracking.Trackable):
"""A fake optimizer used to support restoring TensorFlow 2.2 checkpoints.
The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class
exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow.
In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the
following in LossScaleOptimizer.__init__
```
self._track_trackable(self._optimizer, 'base_optimizer')
```
This means a dependency from the LossScaleOptimizer to the wrapped optimizer
would be stored in the checkpoint. However now, the checkpoint format with a
LossScaleOptimizer is the same as the format without a LossScaleOptimizer,
except the loss scale is also stored. This means there is no dependency from
the LossScaleOptimizer to the wrapped optimizer. Instead, the
LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's
perspective, by overriding all Trackable methods and delegating them to the
wrapped optimizer.
To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency
on this class instead of the inner optimizer. When restored, this class will
instead restore the slot variables of the inner optimizer. Since this class
has no variables, it does not affect the checkpoint when saved.
"""
def __init__(self, optimizer):
self._optimizer = optimizer
def get_slot_names(self):
return self._optimizer.get_slot_names()
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
tf.__internal__.mixed_precision.register_loss_scale_wrapper(optimizer_v2.OptimizerV2,
LossScaleOptimizerV1)
def _multiply_gradient(gradient, scale):
"""Multiply a (possibly sparse) gradient by the given scale factor."""
scale = tf.cast(scale, gradient.dtype)
if isinstance(gradient, tf.IndexedSlices):
return tf.IndexedSlices(
gradient.values * scale,
gradient.indices,
dense_shape=gradient.dense_shape)
else:
return gradient * scale
def strategy_supports_loss_scaling():
"""Returns True if the current Strategy supports loss scaling."""
if not tf.distribute.has_strategy():
return True
strategy = tf.distribute.get_strategy()
# Strategies are supported if either there is only one replica or if variables
# are replicated per device. Otherwise, the current model.fit() implementation
# and most custom training loops incorrectly unscale the gradients. Currently,
# gradients are unscaled once per compute replica, but they should be unscaled
# once per variable replica. When there is one variable replica for each
# compute replica, this works fine, but otherwise issues will occur.
# TODO(reedwm): Support all strategies.
return isinstance(strategy, (
tf.distribute.MultiWorkerMirroredStrategy,
tf.compat.v1.distribute.experimental.MultiWorkerMirroredStrategy,
tf.distribute.OneDeviceStrategy,
tf.compat.v1.distribute.OneDeviceStrategy,
tf.distribute.MirroredStrategy,
tf.compat.v1.distribute.MirroredStrategy,
))
| 51,264 | 40.611201 | 110 | py |
keras | keras-master/keras/mixed_precision/device_compatibility_check_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the device compatibility check."""
import tensorflow.compat.v2 as tf
import re
from keras import combinations
from keras.mixed_precision import device_compatibility_check
from tensorflow.python.platform import tf_logging
def device_details(device_name, compute_capability=None):
details = {}
if device_name:
details['device_name'] = device_name
if compute_capability:
details['compute_capability'] = compute_capability
return details
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DeviceCompatibilityCheckTest(tf.test.TestCase):
def _test_compat_check(self, device_attr_list, should_warn, expected_regex,
policy_name='mixed_float16'):
with tf.compat.v1.test.mock.patch.object(tf_logging, 'warning') as mock_warn, \
tf.compat.v1.test.mock.patch.object(tf_logging, 'info') as mock_info:
device_compatibility_check._log_device_compatibility_check(
policy_name, device_attr_list)
if should_warn:
self.assertRegex(mock_warn.call_args[0][0], expected_regex)
mock_info.assert_not_called()
else:
self.assertRegex(mock_info.call_args[0][0], expected_regex)
mock_warn.assert_not_called()
def test_supported(self):
details_list = [device_details('GPU 1', (7, 1))]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): OK\n'
r'Your GPU will likely run quickly with dtype policy mixed_float16 as '
r'it has compute capability of at least 7.0. Your GPU: GPU 1, compute '
r'capability 7.1', flags=re.MULTILINE)
self._test_compat_check(details_list, False, regex)
details_list = [
device_details('GPU 1', (7, 0)),
device_details('GPU 2', (7, 1)),
device_details('GPU 3', (8, 0)),
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): OK\n'
r'Your GPUs will likely run quickly with dtype policy mixed_float16 as '
r'they all have compute capability of at least 7.0', flags=re.MULTILINE)
self._test_compat_check(details_list, False, regex)
def test_unsupported(self):
details_list = [
device_details('GPU 1', (6, 0))
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPU may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 6.0\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = [
device_details(None)
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPU may run slowly with dtype policy mixed_float16.*\n'
r' Unknown GPU, no compute capability \(probably not an Nvidia GPU\)\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = [
device_details('GPU 1', (6, 0)),
device_details('GPU 2', (3, 10)),
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPUs may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 6.0\n'
r' GPU 2, compute capability 3.10\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = [
device_details('GPU 1', (6, 0)),
device_details('GPU 1', (6, 0)),
device_details('GPU 1', (6, 0)),
device_details('GPU 2', (3, 10)),
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPUs may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 6.0 \(x3\)\n'
r' GPU 2, compute capability 3.10\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = []
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'The dtype policy mixed_float16 may run slowly because this machine '
r'does not have a GPU', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
def test_mix_of_supported_and_unsupported(self):
details_list = [
device_details('GPU 1', (7, 0)),
device_details('GPU 1', (7, 0)),
device_details('GPU 2', (6, 0))
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Some of your GPUs may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 7.0 \(x2\)\n'
r' GPU 2, compute capability 6.0\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
if __name__ == '__main__':
tf.test.main()
| 5,467 | 37.507042 | 83 | py |
keras | keras-master/keras/mixed_precision/model_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests keras.Model works properly with mixed precision."""
import tensorflow.compat.v2 as tf
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
from keras import backend
from keras import combinations
from keras import keras_parameterized
from keras import layers
from keras import models
from keras import optimizer_v1
from keras import testing_utils
from keras.applications import densenet
from keras.applications import efficientnet
from keras.applications import inception_resnet_v2
from keras.applications import inception_v3
from keras.applications import mobilenet
from keras.applications import nasnet
from keras.applications import resnet
from keras.applications import vgg16
from keras.applications import xception
from keras.engine import base_layer_utils
from keras.engine import input_spec
from keras.engine import sequential
from keras.layers import core
from keras.mixed_precision import get_layer_policy
from keras.mixed_precision import loss_scale_optimizer
from keras.mixed_precision import policy
from keras.mixed_precision import test_util as mp_test_util
from keras.optimizer_v2 import gradient_descent
from keras.saving import save
from keras.utils import generic_utils
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = tf.distribute.get_strategy
def create_mirrored_strategy():
"""Create a MirroredStrategy, using a GPU if it is available."""
if tf.config.list_logical_devices('GPU'):
return tf.distribute.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return tf.distribute.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy
})
class KerasModelTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras models."""
def _skip_if_strategy_unsupported(self, strategy_fn):
if (strategy_fn != default_strategy_fn and
testing_utils.get_model_type() == 'subclass'):
self.skipTest('Non-default strategies are unsupported with subclassed '
'models')
def _skip_if_save_format_unsupported(self, save_format):
model_type = testing_utils.get_model_type()
if save_format == 'h5' and model_type == 'subclass':
self.skipTest('Saving subclassed models with the HDF5 format is '
'unsupported')
if (save_format == 'tf' and model_type == 'subclass' and
not tf.executing_eagerly()):
self.skipTest('b/148820505: This combination of features is currently '
'broken.')
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'operator',
'strategy_fn': create_mirrored_strategy,
'use_operator': True
}, {
'testcase_name': 'regularizer',
'strategy_fn': create_mirrored_strategy,
'use_regularizer': True
}, {
'testcase_name': 'get_config',
'strategy_fn': create_mirrored_strategy,
'get_config': True,
'use_regularizer': True,
}, {
'testcase_name': 'saved_model',
'strategy_fn': default_strategy_fn,
'save_format': 'tf',
'use_regularizer': True,
}, {
'testcase_name': 'saved_model_input_spec',
'strategy_fn': default_strategy_fn,
'save_format': 'tf',
'use_regularizer': True,
'use_input_spec': True,
}, {
'testcase_name': 'h5',
'strategy_fn': default_strategy_fn,
'save_format': 'h5',
'use_regularizer': True,
}, {
'testcase_name': 'saved_model_distribute',
'strategy_fn': create_mirrored_strategy,
'save_format': 'tf',
'use_regularizer': True,
}, {
'testcase_name': 'saved_model_input_spec_distribute',
'strategy_fn': create_mirrored_strategy,
'save_format': 'tf',
'use_regularizer': True,
'use_input_spec': True,
}, {
'testcase_name': 'h5_distribute',
'strategy_fn': create_mirrored_strategy,
'save_format': 'h5',
'use_regularizer': True,
}, {
'testcase_name': 'saved_model_v1_policy',
'strategy_fn': create_mirrored_strategy,
'use_v1_policy': True,
'save_format': 'tf',
})
def test_model(self,
strategy_fn,
use_operator=False,
use_regularizer=False,
policy_name='mixed_float16',
get_config=False,
save_format=None,
use_input_spec=False,
use_v1_policy=False):
self._skip_if_strategy_unsupported(strategy_fn)
self._skip_if_save_format_unsupported(save_format)
if use_regularizer:
weight_regularizer = mp_test_util.IdentityRegularizer()
activity_regularizer = mp_test_util.ReduceSumRegularizer()
else:
weight_regularizer = activity_regularizer = None
with strategy_fn().scope():
cls = policy.PolicyV1 if use_v1_policy else policy.Policy
with policy.policy_scope(cls(policy_name)):
layer = mp_test_util.MultiplyLayer(
assert_type=tf.float16,
use_operator=use_operator,
regularizer=weight_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(1,))
if use_input_spec:
layer.input_spec = input_spec.InputSpec(shape=(None, 1))
model = testing_utils.get_model_from_layers([layer], input_shape=(1,),
input_dtype=tf.float16)
if get_config:
config = model.get_config()
model = model.__class__.from_config(
config,
custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})
(layer,) = (layer for layer in model.layers
if isinstance(layer, mp_test_util.MultiplyLayer))
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2**-14)
# Use a fixed loss scale, as this test will fail if gradients are
# skipped for a step due to dynamic loss scaling.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=8)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 2 ** -14 subtracted
# from it.
expected = 1 - 2**-14
if use_regularizer:
# Weight and activity regularizer each add another 2 ** -14 to the
# gradient.
expected -= 2 * 2**-14
self.assertEqual(backend.eval(layer.v), expected)
if save_format:
with generic_utils.CustomObjectScope(
{'MultiplyLayer': mp_test_util.MultiplyLayer, 'loss_fn': loss_fn}):
self._test_saving(model, dataset, save_format, use_regularizer)
def _test_saving(self, model, dataset, save_format, use_regularizer):
# Save and load model, asserting variable does not change
save_path = os.path.join(self.get_temp_dir(), 'model')
model.save(save_path, save_format=save_format)
model = save.load_model(save_path)
(layer,) = (layer for layer in model.layers
if 'MultiplyLayer' in layer.__class__.__name__)
expected = 1 - 2**-14
if use_regularizer:
expected -= 2 * 2**-14
self.assertEqual(backend.eval(layer.v), expected)
# Continue training, and assert variable is correct value
model.fit(dataset)
new_expected = expected - 2 ** -14
if use_regularizer:
new_expected -= 2 * 2 ** -14
self.assertEqual(backend.eval(layer.v), new_expected)
# Load saved model again, and assert variable is previous value
model = save.load_model(save_path)
(layer,) = (layer for layer in model.layers
if 'MultiplyLayer' in layer.__class__.__name__)
self.assertEqual(backend.eval(layer.v), expected)
# Ensure various dtype-related aspects of the layer are correct
self.assertEqual(layer.dtype, 'float32')
self.assertEqual(get_layer_policy.get_layer_policy(layer).name,
'mixed_float16')
self.assertEqual(layer.v.dtype, 'float32')
self.assertEqual(layer(np.ones((2, 1))).dtype, 'float16')
# Loading a model always loads with a v2 Policy, even if saved with a
# PolicyV1.
self.assertEqual(type(model.dtype_policy), policy.Policy)
self.assertEqual(layer.get_config()['dtype'],
{'class_name': 'Policy', 'config': {
'name': 'mixed_float16'}})
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
})
def test_fixed_loss_scaling(self,
strategy_fn):
# Note: We do not test mixed precision in this method, only loss scaling.
loss_scale = 8.
batch_size = 4
with strategy_fn().scope():
x = layers.Input(shape=(1,), batch_size=batch_size)
layer = mp_test_util.MultiplyLayer()
y = layer(x)
# The gradient of 'y' at this point is 1. With loss scaling, the gradient
# is 'loss_scale'. We divide by the batch size since the loss is averaged
# across batch elements.
expected_gradient = loss_scale / batch_size
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn([expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=loss_scale)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 1 subtracted from it.
expected = 0
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'loss_scaling',
'strategy_fn': create_mirrored_strategy,
'use_loss_scaling': True
})
def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
# The advanced model tests mixed-precision-related features that would occur
# in a resnet50 model. It tests a model that has:
# * Multiple layers, some which use auto-cast variables and some which do
# not
# * Regularization on some variables and not others.
# * A fixed loss scale (if use_loss_scaling is True)
strategy = strategy_fn()
if use_loss_scaling:
loss_scale = 8.
learning_rate = 2**-14
with strategy.scope():
with policy.policy_scope(policy.Policy('mixed_float16')):
x = layers.Input(shape=(1,), batch_size=2)
layer1 = mp_test_util.MultiplyLayer(
assert_type=tf.float16,
regularizer=mp_test_util.IdentityRegularizer(),
use_operator=True)
layer2 = mp_test_util.MultiplyLayerWithoutAutoCast(
assert_type=tf.float16, use_operator=True)
layer3 = mp_test_util.MultiplyLayer(assert_type=tf.float16,
use_operator=False)
layer4 = mp_test_util.MultiplyLayerWithoutAutoCast(
assert_type=tf.float16,
regularizer=mp_test_util.IdentityRegularizer(),
use_operator=False)
y = layer1(x)
y = layer2(y)
y = layer3(y)
y = layer4(y)
if use_loss_scaling:
# The gradient of 'y' at this point is 1. With loss scaling, the
# gradient is 'loss_scale'. We divide by the batch size of 2 since the
# loss is averaged across batch elements.
expected_gradient = loss_scale / 2
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=tf.float16,
expected_gradient=[expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
opt = gradient_descent.SGD(learning_rate)
if use_loss_scaling:
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=loss_scale)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
for layer in (layer1, layer2, layer3, layer4):
if layer.losses:
# Layer has weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate)
else:
# Layer does not have weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - learning_rate)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'pass_loss_scale_to_policy',
'strategy_fn': create_mirrored_strategy,
'pass_loss_scale_to_policy': True,
}, {
'testcase_name': 'get_config',
'strategy_fn': create_mirrored_strategy,
'get_config': True,
}, {
'testcase_name': 'get_config_v1_lso',
'strategy_fn': create_mirrored_strategy,
'get_config': True,
'use_v1_loss_scale_optimizer': True,
}, {
'testcase_name': 'get_config_and_pass_loss_scale_to_policy',
'strategy_fn': create_mirrored_strategy,
'get_config': True,
'pass_loss_scale_to_policy': True,
})
def test_dynamic_loss_scaling(self,
strategy_fn,
pass_loss_scale_to_policy=False,
get_config=False,
use_v1_loss_scale_optimizer=False):
strategy = strategy_fn()
initial_loss_scale = 2.
batch_size = 4
expected_gradient = backend.variable([initial_loss_scale / batch_size],
dtype=tf.float16)
# If this variable is set to True, the model below will have NaN gradients
have_nan_gradients = backend.variable(False, dtype=tf.bool)
with strategy.scope():
opt = gradient_descent.SGD(1.)
if pass_loss_scale_to_policy:
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=initial_loss_scale, increment_period=2)
p = policy.PolicyV1('mixed_float16', loss_scale=loss_scale)
elif use_v1_loss_scale_optimizer:
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=initial_loss_scale, increment_period=2)
p = policy.Policy('mixed_float16')
opt = loss_scale_optimizer.LossScaleOptimizerV1(
opt, loss_scale)
else:
p = policy.Policy('mixed_float16')
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=initial_loss_scale, dynamic_growth_steps=2)
with policy.policy_scope(p):
x = layers.Input(
shape=(1,), batch_size=batch_size, dtype=tf.float16)
layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)
y = layer(x)
identity_with_nan_grads = (
mp_test_util.create_identity_with_nan_gradients_fn(
have_nan_gradients))
y = core.Lambda(identity_with_nan_grads)(y)
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=tf.float16,
expected_gradient=expected_gradient))
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
if get_config:
config = model.get_config()
model = model.__class__.from_config(
config,
custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})
(layer,) = (layer for layer in model.layers
if isinstance(layer, mp_test_util.MultiplyLayer))
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# The variables starts with 1 and has a gradient of 1, so will go down by 1
# each step.
self.assertEqual(backend.eval(layer.v), 0)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -1)
# There have been two steps without NaNs, so the loss scale will double
backend.set_value(expected_gradient,
backend.get_value(expected_gradient * 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -2)
# Next test with NaN gradients.
backend.set_value(have_nan_gradients, True)
model.fit(dataset)
# Variable should not be updated
self.assertEqual(backend.eval(layer.v), -2)
# Test with finite gradients again
backend.set_value(have_nan_gradients, False)
# The loss scale will be halved due to the NaNs, so the gradient will also
# be halved
backend.set_value(expected_gradient,
backend.get_value(expected_gradient / 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_loss_scale_optimizer_overrides_policy_v1_loss_scale(self):
with policy.policy_scope(policy.PolicyV1('float32', loss_scale=10.)):
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=5.)
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(x, y)
model.compile(opt, loss='mse')
self.assertEqual(self.evaluate(model.optimizer.loss_scale), 5.)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_policy_v1_without_loss_scale(self):
with policy.policy_scope(policy.PolicyV1('mixed_float16',
loss_scale=None)):
opt = gradient_descent.SGD(1.)
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(x, y)
model.compile(opt, loss='mse')
self.assertNotIsInstance(model.optimizer,
loss_scale_optimizer.LossScaleOptimizer)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_pass_invalid_optimizer_with_loss_scaling(self):
with policy.policy_scope(policy.PolicyV1('float32', loss_scale=10.)):
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(x, y)
if tf.executing_eagerly():
error_msg = 'Use a `tf.keras` Optimizer instead'
else:
error_msg = 'optimizer" must be an instance of '
with self.assertRaisesRegex(ValueError, error_msg):
model.compile(optimizer_v1.SGD(1.), 'mse')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_functional_model_loss_dtype(self):
with policy.policy_scope('float16'):
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(x, y)
model.add_loss(tf.cast(y, 'float32'))
# The loss should not be casted to the policy's dtype.
self.assertEqual(model.losses[0].dtype, 'float32')
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
with strategy_fn().scope():
with policy.policy_scope('mixed_float16'):
x = layers.Input(shape=(1,), batch_size=2)
layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)
y = layer(x)
model = models.Model(inputs=x, outputs=y)
model.set_weights([np.array(100.)])
x = np.ones((2, 1))
self.assertAllClose(backend.get_value(model(x)), x * 100.)
suffix = '.h5' if h5 else ''
weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
model.save_weights(weights_file)
model.set_weights([np.array(200.)])
self.assertAllClose(backend.get_value(model(x)), x * 200.)
model.load_weights(weights_file)
self.assertAllClose(backend.get_value(model(x)), x * 100.)
self.assertEqual(model.get_weights(), [np.array(100.)])
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'different_var_name',
'strategy_fn': default_strategy_fn,
'var_name': 'w'
}, {
'testcase_name': 'different_var_name_distribute',
'strategy_fn': create_mirrored_strategy,
'var_name': 'w'
})
def test_save_slot_variables_with_autocast_vars(self,
strategy_fn,
var_name='v'):
p = policy.Policy('mixed_float16')
with strategy_fn().scope(), policy.policy_scope(p):
x = layers.Input(shape=(2,), batch_size=2)
# Having a var_name other than 'v' tests that a fixed bug (b/134713714)
# does not reoccur. The bug was that a crash would occur when saving a
# checkpoint where an AutoCastVariable with a slot variable would have a
# different name than the layer attribute's name (layer.v in this case).
layer = mp_test_util.MultiplyLayer(assert_type=tf.float16,
var_name=var_name)
y = layer(x)
model = models.Model(inputs=x, outputs=y)
opt = gradient_descent.SGD(1., 1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=1)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
weights_file = os.path.join(self.get_temp_dir(), 'weights')
model.save_weights(weights_file)
saved_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
new_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
self.assertNotEqual(new_slot, saved_slot)
model.load_weights(weights_file)
restored_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
self.assertEqual(restored_slot, saved_slot)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(*TESTCASES)
def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
strategy = strategy_fn()
if (isinstance(strategy, tf.distribute.MirroredStrategy) and
not tf.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=tf.float32)
y = mp_test_util.MultiplyLayer(assert_type=tf.float32)(x)
model = models.Model(inputs=x, outputs=y)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=1., dynamic_growth_steps=2.)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(opt.loss_scale), 2)
self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
# Save model weights.
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(save_prefix)
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
self.assertEqual(backend.get_value(opt.loss_scale), 4)
self.assertEqual(backend.get_value(opt.dynamic_counter), 0)
# Load model weights and ensure loss scale weights are restored.
model.load_weights(save_prefix)
self.assertEqual(backend.get_value(opt.loss_scale), 2)
self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
@keras_parameterized.run_all_keras_modes
def test_restore_old_loss_scale_checkpoint(self):
# Ensure a checkpoint from TF 2.2 can be loaded. The checkpoint format
# of LossScaleOptimizer changed, but old checkpoints can still be loaded
opt = gradient_descent.SGD(0.1, momentum=0.1)
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
model = sequential.Sequential([core.Dense(2,)])
# The checkpoint and expected values were obtained from the program in
# testdata/BUILD.
ckpt_dir = os.path.join(
flags.FLAGS['test_srcdir'].value,
'org_keras/keras',
'mixed_precision/testdata/lso_ckpt_tf2.2')
# ckpt_dir = test.test_src_dir_path(
# 'python/keras/mixed_precision/testdata/lso_ckpt_tf2.2')
model.load_weights(os.path.join(ckpt_dir, 'ckpt'))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
model(np.zeros((2, 2))) # Create model weights
opt._create_all_weights(model.weights)
expected_kernel = np.array([[9.229685, 10.901115], [10.370763, 9.757362]])
expected_slot = np.array([[10.049943, 9.917691], [10.049943, 9.917691]])
self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
self.assertAllClose(
self.evaluate(opt.get_slot(model.weights[0], 'momentum')),
expected_slot)
self.assertEqual(self.evaluate(opt.loss_scale), 32768)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
# Check restoring works even after the model is compiled and the weights
# have been created.
model.fit(np.random.normal(size=(2, 2)), np.random.normal(size=(2, 2)))
self.assertNotAllClose(self.evaluate(model.weights[0]), expected_kernel)
self.assertNotAllClose(
self.evaluate(opt.get_slot(model.weights[0], 'momentum')),
expected_slot)
model.load_weights(os.path.join(ckpt_dir, 'ckpt'))
self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
self.assertAllClose(
self.evaluate(opt.get_slot(model.weights[0], 'momentum')),
expected_slot)
self.assertEqual(self.evaluate(opt.loss_scale), 32768)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def test_restore_old_saved_model(self):
saved_model_dir = os.path.join(
flags.FLAGS['test_srcdir'].value,
'org_keras/keras',
'mixed_precision/testdata/lso_savedmodel_tf2.2')
# saved_model_dir = test.test_src_dir_path(
# 'python/keras/mixed_precision/testdata/'
# 'lso_savedmodel_tf2.2')
model = save.load_model(saved_model_dir)
expected_kernel = np.array([[9.229685, 10.901115], [10.370763, 9.757362]])
self.assertAllClose(backend.eval(model.weights[0]), expected_kernel)
self.assertEqual(type(model.optimizer),
loss_scale_optimizer.LossScaleOptimizer)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'use_v1_lso',
'strategy_fn': create_mirrored_strategy,
'use_v1_loss_scale_optimizer': True
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
def test_save_model_with_dynamic_loss_scaling(
self, strategy_fn, h5=False, use_v1_loss_scale_optimizer=False):
# TODO(reedwm): Support and test saving model with a mixed_[b]float16 policy
# as well.
strategy = strategy_fn()
if (isinstance(strategy, tf.distribute.MirroredStrategy) and
not tf.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=tf.float32)
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(inputs=x, outputs=y)
opt = gradient_descent.SGD(1.)
if use_v1_loss_scale_optimizer:
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=1., increment_period=2.)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
else:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=1.,
dynamic_growth_steps=2.)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.ones((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(opt.loss_scale), 2)
self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
(weight,) = model.trainable_weights
orig_weight = backend.get_value(weight)
# Save model weights.
save_path = os.path.join(self.get_temp_dir(), 'model')
model.save(save_path, save_format='h5' if h5 else 'tf')
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
new_weight = backend.get_value(weight)
self.assertNotEqual(new_weight, orig_weight)
self.assertEqual(backend.get_value(opt.loss_scale), 4)
self.assertEqual(backend.get_value(opt.dynamic_counter), 0)
# Load model weights and ensure loss scale weights are restored.
model = save.load_model(
save_path, custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})
(weight,) = model.trainable_weights
loaded_weight = backend.get_value(weight)
self.assertEqual(loaded_weight, orig_weight)
# Currently the loss scale isn't always saved when the model is saved with
# Model.save(). So we assert the loss scale either has the value when it was
# saved, or the value it was initialized with.
# TODO(reedwm): Always save/restore the loss scale with Model.save().
self.assertIn(backend.get_value(model.optimizer.loss_scale), (1, 2))
self.assertIn(backend.get_value(model.optimizer.dynamic_counter), (0, 1))
# Test optimizer attributes and type
self.assertEqual(model.optimizer.initial_scale, 1.)
self.assertEqual(model.optimizer.dynamic_growth_steps, 2.)
self.assertEqual(type(model.optimizer),
loss_scale_optimizer.LossScaleOptimizer)
class ApplicationModelTest(keras_parameterized.TestCase):
"""Tests that application models can be built with mixed precision.
This does not test that such models can be trained in mixed precision, as
doing so takes too much time for a unit test.
"""
@parameterized.named_parameters(
('densenet', densenet.DenseNet121),
('efficientnet', efficientnet.EfficientNetB0),
('inception_resnet_v2', inception_resnet_v2.InceptionResNetV2),
('inception_v3', inception_v3.InceptionV3),
('mobilenet', mobilenet.MobileNet),
('nasnet', nasnet.NASNetMobile),
('vgg16', vgg16.VGG16),
('xception', xception.Xception),
('resnet50', resnet.ResNet50),
)
def test_application_model(self, app):
# Run on CPU since model weights may exhaust GPU memory
with policy.policy_scope('mixed_float16'), tf.device('/CPU:0'):
app(weights=None)
if __name__ == '__main__':
base_layer_utils.enable_v2_dtype_behavior()
tf.test.main()
| 35,294 | 39.756351 | 80 | py |
keras | keras-master/keras/mixed_precision/layer_correctness_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests various Layer subclasses have correct outputs with mixed precision."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import keras_parameterized
from keras import layers
from keras import models
from keras.layers import advanced_activations
from keras.layers import convolutional
from keras.layers import convolutional_recurrent
from keras.layers import core
from keras.layers import dense_attention
from keras.layers import embeddings
from keras.layers import local
from keras.layers import merge
from keras.layers import noise
from keras.layers import pooling
from keras.layers import recurrent
from keras.layers import recurrent_v2
from keras.layers import wrappers
from keras.layers.normalization import batch_normalization
from keras.layers.normalization import layer_normalization
from keras.layers.preprocessing import image_preprocessing
from keras.layers.preprocessing import normalization
from keras.mixed_precision import policy
def create_mirrored_strategy():
# The test creates two virtual CPUs, and we use both of them to test with
# multiple devices.
return tf.distribute.MirroredStrategy(['cpu:0', 'cpu:1'])
def _create_normalization_layer_with_adapt():
layer = normalization.Normalization()
layer.adapt(np.random.normal(size=(10, 4)))
return layer
def _create_normalization_layer_without_adapt():
return normalization.Normalization(
mean=np.random.normal(size=(4,)),
variance=np.random.uniform(0.5, 2., size=(4,))
)
class LayerCorrectnessTest(keras_parameterized.TestCase):
def setUp(self):
super(LayerCorrectnessTest, self).setUp()
# Set two virtual CPUs to test MirroredStrategy with multiple devices
cpus = tf.config.list_physical_devices('CPU')
tf.config.set_logical_device_configuration(cpus[0], [
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
])
def _create_model_from_layer(self, layer, input_shapes):
inputs = [layers.Input(batch_input_shape=s) for s in input_shapes]
if len(inputs) == 1:
inputs = inputs[0]
y = layer(inputs)
model = models.Model(inputs, y)
model.compile('sgd', 'mse')
return model
@parameterized.named_parameters(
('LeakyReLU', advanced_activations.LeakyReLU, (2, 2)),
('PReLU', advanced_activations.PReLU, (2, 2)),
('ELU', advanced_activations.ELU, (2, 2)),
('ThresholdedReLU', advanced_activations.ThresholdedReLU, (2, 2)),
('Softmax', advanced_activations.Softmax, (2, 2)),
('ReLU', advanced_activations.ReLU, (2, 2)),
('Conv1D', lambda: convolutional.Conv1D(2, 2), (2, 2, 1)),
('Conv2D', lambda: convolutional.Conv2D(2, 2), (2, 2, 2, 1)),
('Conv3D', lambda: convolutional.Conv3D(2, 2), (2, 2, 2, 2, 1)),
('Conv2DTranspose', lambda: convolutional.Conv2DTranspose(2, 2),
(2, 2, 2, 2)),
('SeparableConv2D', lambda: convolutional.SeparableConv2D(2, 2),
(2, 2, 2, 1)),
('DepthwiseConv2D', lambda: convolutional.DepthwiseConv2D(2, 2),
(2, 2, 2, 1)),
('UpSampling2D', convolutional.UpSampling2D, (2, 2, 2, 1)),
('ZeroPadding2D', convolutional.ZeroPadding2D, (2, 2, 2, 1)),
('Cropping2D', convolutional.Cropping2D, (2, 3, 3, 1)),
('ConvLSTM2D',
lambda: convolutional_recurrent.ConvLSTM2D(4, kernel_size=(2, 2)),
(4, 4, 4, 4, 4)),
('Dense', lambda: core.Dense(2), (2, 2)),
('Dropout', lambda: core.Dropout(0.5), (2, 2)),
('SpatialDropout2D', lambda: core.SpatialDropout2D(0.5), (2, 2, 2, 2)),
('Activation', lambda: core.Activation('sigmoid'), (2, 2)),
('Reshape', lambda: core.Reshape((1, 4, 1)), (2, 2, 2)),
('Permute', lambda: core.Permute((2, 1)), (2, 2, 2)),
('Attention', dense_attention.Attention, [(2, 2, 3), (2, 3, 3),
(2, 3, 3)]),
('AdditiveAttention', dense_attention.AdditiveAttention, [(2, 2, 3),
(2, 3, 3),
(2, 3, 3)]),
('Embedding', lambda: embeddings.Embedding(4, 4),
(2, 4), 2e-3, 2e-3, np.random.randint(4, size=(2, 4))),
('LocallyConnected1D', lambda: local.LocallyConnected1D(2, 2), (2, 2, 1)),
('LocallyConnected2D', lambda: local.LocallyConnected2D(2, 2),
(2, 2, 2, 1)),
('Add', merge.Add, [(2, 2), (2, 2)]),
('Subtract', merge.Subtract, [(2, 2), (2, 2)]),
('Multiply', merge.Multiply, [(2, 2), (2, 2)]),
('Average', merge.Average, [(2, 2), (2, 2)]),
('Maximum', merge.Maximum, [(2, 2), (2, 2)]),
('Minimum', merge.Minimum, [(2, 2), (2, 2)]),
('Concatenate', merge.Concatenate, [(2, 2), (2, 2)]),
('Dot', lambda: merge.Dot(1), [(2, 2), (2, 2)]),
('GaussianNoise', lambda: noise.GaussianNoise(0.5), (2, 2)),
('GaussianDropout', lambda: noise.GaussianDropout(0.5), (2, 2)),
('AlphaDropout', lambda: noise.AlphaDropout(0.5), (2, 2)),
('BatchNormalization', batch_normalization.BatchNormalization,
(2, 2), 1e-2, 1e-2),
('LayerNormalization', layer_normalization.LayerNormalization, (2, 2)),
('LayerNormalizationUnfused',
lambda: layer_normalization.LayerNormalization(axis=1), (2, 2, 2)),
('MaxPooling2D', pooling.MaxPooling2D, (2, 2, 2, 1)),
('AveragePooling2D', pooling.AveragePooling2D, (2, 2, 2, 1)),
('GlobalMaxPooling2D', pooling.GlobalMaxPooling2D, (2, 2, 2, 1)),
('GlobalAveragePooling2D', pooling.GlobalAveragePooling2D, (2, 2, 2, 1)),
('SimpleRNN', lambda: recurrent.SimpleRNN(units=4),
(4, 4, 4), 1e-2, 1e-2),
('GRU', lambda: recurrent.GRU(units=4), (4, 4, 4)),
('LSTM', lambda: recurrent.LSTM(units=4), (4, 4, 4)),
('GRUV2', lambda: recurrent_v2.GRU(units=4), (4, 4, 4)),
('LSTMV2', lambda: recurrent_v2.LSTM(units=4), (4, 4, 4)),
('TimeDistributed', lambda: wrappers.TimeDistributed(core.Dense(2)),
(2, 2, 2)),
('Bidirectional',
lambda: wrappers.Bidirectional(recurrent.SimpleRNN(units=4)), (2, 2, 2)),
('AttentionLayerCausal', lambda: dense_attention.Attention(causal=True), [
(2, 2, 3), (2, 3, 3), (2, 3, 3)
]),
('AdditiveAttentionLayerCausal',
lambda: dense_attention.AdditiveAttention(causal=True), [(2, 3, 4),
(2, 3, 4),
(2, 3, 4)]),
('NormalizationAdapt', _create_normalization_layer_with_adapt, (4, 4)),
('NormalizationNoAdapt', _create_normalization_layer_without_adapt,
(4, 4)),
('Resizing', lambda: image_preprocessing.Resizing(3, 3), (2, 5, 5, 1)),
('Rescaling', lambda: image_preprocessing.Rescaling(2., 1.), (6, 6)),
('CenterCrop', lambda: image_preprocessing.CenterCrop(3, 3),
(2, 5, 5, 1))
)
def test_layer(self, f32_layer_fn, input_shape, rtol=2e-3, atol=2e-3,
input_data=None):
"""Tests a layer by comparing the float32 and mixed precision weights.
A float32 layer, a mixed precision layer, and a distributed mixed precision
layer are run. The three layers are identical other than their dtypes and
distribution strategies. The outputs after predict() and weights after fit()
are asserted to be close.
Args:
f32_layer_fn: A function returning a float32 layer. The other two layers
will automatically be created from this
input_shape: The shape of the input to the layer, including the batch
dimension. Or a list of shapes if the layer takes multiple inputs.
rtol: The relative tolerance to be asserted.
atol: The absolute tolerance to be asserted.
input_data: A Numpy array with the data of the input. If None, input data
will be randomly generated
"""
if f32_layer_fn == convolutional.ZeroPadding2D and \
tf.test.is_built_with_rocm():
return
if isinstance(input_shape[0], int):
input_shapes = [input_shape]
else:
input_shapes = input_shape
strategy = create_mirrored_strategy()
f32_layer = f32_layer_fn()
# Create the layers
assert f32_layer.dtype == f32_layer._compute_dtype == 'float32'
config = f32_layer.get_config()
config['dtype'] = policy.Policy('mixed_float16')
mp_layer = f32_layer.__class__.from_config(config)
distributed_mp_layer = f32_layer.__class__.from_config(config)
# Compute per_replica_input_shapes for the distributed model
global_batch_size = input_shapes[0][0]
assert global_batch_size % strategy.num_replicas_in_sync == 0, (
'The number of replicas, %d, does not divide the global batch size of '
'%d' % (strategy.num_replicas_in_sync, global_batch_size))
per_replica_batch_size = (
global_batch_size // strategy.num_replicas_in_sync)
per_replica_input_shapes = [(per_replica_batch_size,) + s[1:]
for s in input_shapes]
# Create the models
f32_model = self._create_model_from_layer(f32_layer, input_shapes)
mp_model = self._create_model_from_layer(mp_layer, input_shapes)
with strategy.scope():
distributed_mp_model = self._create_model_from_layer(
distributed_mp_layer, per_replica_input_shapes)
# Set all model weights to the same values
f32_weights = f32_model.get_weights()
mp_model.set_weights(f32_weights)
distributed_mp_model.set_weights(f32_weights)
# Generate input data
if input_data is None:
# Cast inputs to float16 to avoid measuring error from having f16 layers
# cast to float16.
input_data = [np.random.normal(size=s).astype('float16')
for s in input_shapes]
if len(input_data) == 1:
input_data = input_data[0]
# Assert all models have close outputs.
f32_output = f32_model.predict(input_data)
mp_output = mp_model.predict(input_data)
self.assertAllClose(
mp_output, f32_output, rtol=rtol, atol=atol)
self.assertAllClose(
distributed_mp_model.predict(input_data), f32_output, rtol=rtol,
atol=atol)
# Run fit() on models
output = np.random.normal(size=f32_model.outputs[0].shape).astype('float16')
for model in f32_model, mp_model, distributed_mp_model:
model.fit(input_data, output, batch_size=global_batch_size)
# Assert all models have close weights
f32_weights = f32_model.get_weights()
self.assertAllClose(
mp_model.get_weights(), f32_weights, rtol=rtol, atol=atol)
self.assertAllClose(
distributed_mp_model.get_weights(), f32_weights, rtol=rtol, atol=atol)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 11,512 | 43.451737 | 80 | py |
keras | keras-master/keras/applications/mobilenet_v2.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v2 models for Keras.
MobileNetV2 is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 22 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4
For each of these `alpha` values, weights for 5 different input image sizes
are provided (224, 192, 160, 128, and 96).
The following table describes the performance of
MobileNet on various input sizes:
------------------------------------------------------------------------
MACs stands for Multiply Adds
Classification Checkpoint|MACs (M)|Parameters (M)|Top 1 Accuracy|Top 5 Accuracy
--------------------------|------------|---------------|---------|----|---------
| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 |
| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 |
| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 |
| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 |
| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 |
| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 |
| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 |
| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 |
| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 |
| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 |
| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 |
| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 |
| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 |
| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 |
| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 |
| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 |
| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 |
| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 |
| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 |
| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 |
| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 |
| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 |
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
"""
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v2/')
layers = None
@keras_export('keras.applications.mobilenet_v2.MobileNetV2',
'keras.applications.MobileNetV2')
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNetV2 architecture.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNetV2, call `tf.keras.applications.mobilenet_v2.preprocess_input`
on your inputs before passing them to the model.
`mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: Float, larger than zero, controls the width of the network. This is
known as the width multiplier in the MobileNetV2 paper, but the name is
kept for consistency with `applications.MobileNetV1` model in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1.0, default number of filters from the paper
are used at each layer.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization), 'imagenet'
(pre-training on ImageNet), or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction when
`include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional integer number of classes to classify images into, only to
be specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError(f'Unknown argument(s): {kwargs}')
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded. '
f'Received `weights={weights}`')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top` '
f'as true, `classes` should be 1000. Received `classes={classes}`')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError(
f'input_tensor: {input_tensor}'
'is not type input_tensor. '
f'Received `type(input_tensor)={type(input_tensor)}`'
)
if is_input_t_tensor:
if backend.image_data_format() == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape[1] must equal shape(input_tensor)[1] '
'when `image_data_format` is `channels_first`; '
'Received `input_tensor.shape='
f'{input_tensor.shape}`'
f', `input_shape={input_shape}`')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError(
'input_tensor.shape[2] must equal input_shape[1]; '
'Received `input_tensor.shape='
f'{input_tensor.shape}`, '
f'`input_shape={input_shape}`')
else:
raise ValueError('input_tensor is not a Keras tensor; '
f'Received `input_tensor={input_tensor}`')
# If input_shape is None, infer shape from input_tensor.
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor must be a valid Keras tensor type; '
f'Received {input_tensor} of type {type(input_tensor)}')
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size.
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError('If imagenet weights are being loaded, '
'alpha must be one of `0.35`, `0.50`, `0.75`, '
'`1.0`, `1.3` or `1.4` only;'
f' Received `alpha={alpha}`')
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [96, 128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be '
'loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.Conv2D(
first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='same',
use_bias=False,
name='Conv1')(img_input)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name='bn_Conv1')(
x)
x = layers.ReLU(6., name='Conv1_relu')(x)
x = _inverted_res_block(
x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15)
x = _inverted_res_block(
x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we increase the number of output
# channels.
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(
last_block_filters, kernel_size=1, use_bias=False, name='Conv_1')(
x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(
x)
x = layers.ReLU(6., name='out_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account any potential predecessors of
# `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if include_top:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(float(alpha)) + '_' + str(rows) + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(float(alpha)) + '_' + str(rows) + '_no_top' + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
"""Inverted ResNet block."""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
in_channels = backend.int_shape(inputs)[channel_axis]
pointwise_conv_filters = int(filters * alpha)
# Ensure the number of filters on the last 1x1 convolution is divisible by 8.
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand with a pointwise 1x1 convolution.
x = layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'expand')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand_BN')(
x)
x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise 3x3 convolution.
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3),
name=prefix + 'pad')(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding='same' if stride == 1 else 'valid',
name=prefix + 'depthwise')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise_BN')(
x)
x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)
# Project wiht a pointwise 1x1 convolution.
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'project')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_BN')(
x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=prefix + 'add')([inputs, x])
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@keras_export('keras.applications.mobilenet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 20,697 | 38.05283 | 87 | py |
keras | keras-master/keras/applications/efficientnet.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
"""EfficientNet models for Keras.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
"""
import tensorflow.compat.v2 as tf
import copy
import math
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/keras-applications/'
WEIGHTS_HASHES = {
'b0': ('902e53a9f72be733fc0bcb005b3ebbac',
'50bc09e76180e00e4465e1a485ddc09d'),
'b1': ('1d254153d4ab51201f1646940f018540',
'74c4e6b3e1f6a1eea24c589628592432'),
'b2': ('b15cce36ff4dcbd00b6dd88e7857a6ad',
'111f8e2ac8aa800a7a99e3239f7bfb39'),
'b3': ('ffd1fdc53d0ce67064dc6a9c7960ede0',
'af6d107764bb5b1abb91932881670226'),
'b4': ('18c95ad55216b8f92d7e70b3a046e2fc',
'ebc24e6d6c33eaebbd558eafbeedf1ba'),
'b5': ('ace28f2a6363774853a83a0b21b9421a',
'38879255a25d3c92d5e44e04ae6cec6f'),
'b6': ('165f6e37dce68623721b423839de8be5',
'9ecce42647a20130c1f39a5d4cb75743'),
'b7': ('8c03f828fec3ef71311cd463b6759d99',
'cbcfe4450ddf6f3ad90b1b398090fe4a'),
}
DEFAULT_BLOCKS_ARGS = [{
'kernel_size': 3,
'repeats': 1,
'filters_in': 32,
'filters_out': 16,
'expand_ratio': 1,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 2,
'filters_in': 16,
'filters_out': 24,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 2,
'filters_in': 24,
'filters_out': 40,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 3,
'filters_in': 40,
'filters_out': 80,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 3,
'filters_in': 80,
'filters_out': 112,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 4,
'filters_in': 112,
'filters_out': 192,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 1,
'filters_in': 192,
'filters_out': 320,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
'distribution': 'truncated_normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For EfficientNet, input preprocessing is included as part of the model
(as a `Rescaling` layer), and thus
`tf.keras.applications.efficientnet.preprocess_input` is actually a
pass-through function. EfficientNet models expect their inputs to be float
tensors of pixels with values in the [0-255] range.
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to True.
weights: One of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded. Defaults to 'imagenet'.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. Defaults to 1000 (number of
ImageNet classes).
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to 'softmax'.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
def EfficientNet(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation='swish',
blocks_args='default',
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Args:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if blocks_args == 'default':
blocks_args = DEFAULT_BLOCKS_ARGS
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.Rescaling(1. / 255.)(x)
x = layers.Normalization(axis=bn_axis)(x)
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3),
name='stem_conv_pad')(x)
x = layers.Conv2D(
round_filters(32),
3,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(round_repeats(args['repeats']) for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'])
args['filters_out'] = round_filters(args['filters_out'])
for j in range(round_repeats(args.pop('repeats'))):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x,
activation,
drop_connect_rate * b / blocks,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args)
b += 1
# Build top
x = layers.Conv2D(
round_filters(1280),
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='top_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
x = layers.Activation(activation, name='top_activation')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name='top_dropout')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Load weights.
if weights == 'imagenet':
if include_top:
file_suffix = '.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
else:
file_suffix = '_notop.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
file_name = model_name + file_suffix
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block(inputs,
activation='swish',
drop_rate=0.,
name='',
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=0.,
id_skip=True):
"""An inverted residual block.
Args:
inputs: input tensor.
activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'expand_conv')(
inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
x = layers.Activation(activation, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=name + 'dwconv_pad')(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'dwconv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
x = layers.Activation(activation, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=name + 'se_reshape')(se)
se = layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_reduce')(
se)
se = layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_expand')(se)
x = layers.multiply([x, se], name=name + 'se_excite')
# Output phase
x = layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'project_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x)
x = layers.add([x, inputs], name=name + 'add')
return x
@keras_export('keras.applications.efficientnet.EfficientNetB0',
'keras.applications.EfficientNetB0')
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.0,
1.0,
224,
0.2,
model_name='efficientnetb0',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB1',
'keras.applications.EfficientNetB1')
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.0,
1.1,
240,
0.2,
model_name='efficientnetb1',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB2',
'keras.applications.EfficientNetB2')
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.1,
1.2,
260,
0.3,
model_name='efficientnetb2',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB3',
'keras.applications.EfficientNetB3')
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.2,
1.4,
300,
0.3,
model_name='efficientnetb3',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB4',
'keras.applications.EfficientNetB4')
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.4,
1.8,
380,
0.4,
model_name='efficientnetb4',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB5',
'keras.applications.EfficientNetB5')
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.6,
2.2,
456,
0.4,
model_name='efficientnetb5',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB6',
'keras.applications.EfficientNetB6')
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.8,
2.6,
528,
0.5,
model_name='efficientnetb6',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB7',
'keras.applications.EfficientNetB7')
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
2.0,
3.1,
600,
0.5,
model_name='efficientnetb7',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB0')
EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB1')
EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB2')
EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB3')
EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB4')
EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB5')
EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB6')
EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB7')
@keras_export('keras.applications.efficientnet.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the efficientnet model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export('keras.applications.efficientnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 25,089 | 31.5 | 87 | py |
keras | keras-master/keras/applications/resnet.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""ResNet models for Keras.
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/resnet/')
WEIGHTS_HASHES = {
'resnet50': ('2cb95161c43110f7111970584f804107',
'4d473c1dd8becc155b73f8504c6f6626'),
'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5',
'88cf7a10940856eca736dc7b7e228a21'),
'resnet152': ('100835be76be38e30d865e96f2aaae62',
'ee4c566cf9a93f14d82f913c2dc6dd0c'),
'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0',
'fac2f116257151a9d068a22e544a4917'),
'resnet101v2': ('6343647c601c52e1368623803854d971',
'c0ed64b8031c3730f411d2eb4eea35b5'),
'resnet152v2': ('a49b44d1979771252814e80f8ec446f9',
'ed17cf2e0169df9d443503ef94b23b33'),
'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a',
'62527c363bdd9ec598bed41947b379fc'),
'resnext101':
('34fb605428fcc7aa4d62f44404c11509', '0f678c91647380debd923963594981b3')
}
layers = None
def ResNet(stack_fn,
preact,
use_bias,
model_name='resnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Args:
stack_fn: a function that returns output tensor for the
stacked residual blocks.
preact: whether to use pre-activation or not
(True for ResNetV2, False for ResNet and ResNeXt).
use_bias: whether to use biases for convolutional layers or not
(True for ResNet and ResNetV2, False for ResNeXt).
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(
padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if not preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.Conv2D(
filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
def stack1(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
preact = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')(x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=name + '_0_conv')(preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.Conv2D(
filters, 1, strides=1, use_bias=False, name=name + '_1_conv')(preact)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(
filters,
kernel_size,
strides=stride,
use_bias=False,
name=name + '_2_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack2(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block2(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block2(x, filters, name=name + '_block' + str(i))
x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def block3(x,
filters,
kernel_size=3,
stride=1,
groups=32,
conv_shortcut=True,
name=None):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
groups: default 32, group size for grouped convolution.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut:
shortcut = layers.Conv2D(
(64 // groups) * filters,
1,
strides=stride,
use_bias=False,
name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
c = filters // groups
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
depth_multiplier=c,
use_bias=False,
name=name + '_2_conv')(x)
x_shape = backend.shape(x)[:-1]
x = backend.reshape(x, backend.concatenate([x_shape, (groups, c, c)]))
x = layers.Lambda(
lambda x: sum(x[:, :, :, :, i] for i in range(c)),
name=name + '_2_reduce')(x)
x = backend.reshape(x, backend.concatenate([x_shape, (filters,)]))
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(
(64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
def stack3(x, filters, blocks, stride1=2, groups=32, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
groups: default 32, group size for grouped convolution.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block3(x, filters, stride=stride1, groups=groups, name=name + '_block1')
for i in range(2, blocks + 1):
x = block3(
x,
filters,
groups=groups,
conv_shortcut=False,
name=name + '_block' + str(i))
return x
@keras_export('keras.applications.resnet50.ResNet50',
'keras.applications.resnet.ResNet50',
'keras.applications.ResNet50')
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet50 architecture."""
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
return stack1(x, 512, 3, name='conv5')
return ResNet(stack_fn, False, True, 'resnet50', include_top, weights,
input_tensor, input_shape, pooling, classes, **kwargs)
@keras_export('keras.applications.resnet.ResNet101',
'keras.applications.ResNet101')
def ResNet101(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet101 architecture."""
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 23, name='conv4')
return stack1(x, 512, 3, name='conv5')
return ResNet(stack_fn, False, True, 'resnet101', include_top, weights,
input_tensor, input_shape, pooling, classes, **kwargs)
@keras_export('keras.applications.resnet.ResNet152',
'keras.applications.ResNet152')
def ResNet152(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet152 architecture."""
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 8, name='conv3')
x = stack1(x, 256, 36, name='conv4')
return stack1(x, 512, 3, name='conv5')
return ResNet(stack_fn, False, True, 'resnet152', include_top, weights,
input_tensor, input_shape, pooling, classes, **kwargs)
@keras_export('keras.applications.resnet50.preprocess_input',
'keras.applications.resnet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.resnet50.decode_predictions',
'keras.applications.resnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For ResNet, call `tf.keras.applications.resnet.preprocess_input` on your
inputs before passing them to the model.
`resnet.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet dataset,
without scaling.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A Keras model instance.
"""
setattr(ResNet50, '__doc__', ResNet50.__doc__ + DOC)
setattr(ResNet101, '__doc__', ResNet101.__doc__ + DOC)
setattr(ResNet152, '__doc__', ResNet152.__doc__ + DOC)
| 21,207 | 35.191126 | 87 | py |
keras | keras-master/keras/applications/vgg16.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG16 model for Keras.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition]
(https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/keras-applications/'
'vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/vgg16/'
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.vgg16.VGG16', 'keras.applications.VGG16')
def VGG16(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the VGG16 model.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG16, call `tf.keras.applications.vgg16.preprocess_input` on your
inputs before passing them to the model.
`vgg16.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet dataset,
without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError(
'The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded. Received: '
f'weights={weights}')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000. '
f'Received `classes={classes}`')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv1')(
img_input)
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='vgg16')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = data_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.vgg16.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.vgg16.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 9,586 | 37.971545 | 87 | py |
keras | keras-master/keras/applications/densenet.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""DenseNet models for Keras.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/densenet/')
DENSENET121_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH +
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET169_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH +
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET201_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH +
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
def dense_block(x, blocks, name):
"""A dense block.
Args:
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
Returns:
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
"""A transition block.
Args:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Args:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet(
blocks,
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the DenseNet architecture.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
inputs before passing them to the model.
`densenet.preprocess_input` will scale pixels between 0 and 1 and then
will normalize each channel with respect to the ImageNet dataset statistics.
Args:
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, blocks[0], name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, blocks[1], name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, blocks[2], name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, blocks[3], name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if blocks == [6, 12, 24, 16]:
model = training.Model(inputs, x, name='densenet121')
elif blocks == [6, 12, 32, 32]:
model = training.Model(inputs, x, name='densenet169')
elif blocks == [6, 12, 48, 32]:
model = training.Model(inputs, x, name='densenet201')
else:
model = training.Model(inputs, x, name='densenet')
# Load weights.
if weights == 'imagenet':
if include_top:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET121_WEIGHT_PATH,
cache_subdir='models',
file_hash='9d60b8095a5708f2dcce2bca79d332c7')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET169_WEIGHT_PATH,
cache_subdir='models',
file_hash='d699b8f76981ab1b30698df4c175e90b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET201_WEIGHT_PATH,
cache_subdir='models',
file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807')
else:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET121_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='30ee3e1110167f948a6b9946edeeb738')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET169_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='b8c4d4c20dd625c148057b9ff1c1176b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET201_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='c13680b51ded0fb44dff2d8f86ac8bb1')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.densenet.DenseNet121',
'keras.applications.DenseNet121')
def DenseNet121(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet121 architecture."""
return DenseNet([6, 12, 24, 16], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet169',
'keras.applications.DenseNet169')
def DenseNet169(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet169 architecture."""
return DenseNet([6, 12, 32, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet201',
'keras.applications.DenseNet201')
def DenseNet201(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet201 architecture."""
return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='torch')
@keras_export('keras.applications.densenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
inputs before passing them to the model.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
"""
setattr(DenseNet121, '__doc__', DenseNet121.__doc__ + DOC)
setattr(DenseNet169, '__doc__', DenseNet169.__doc__ + DOC)
setattr(DenseNet201, '__doc__', DenseNet201.__doc__ + DOC)
| 16,084 | 36.320186 | 87 | py |
keras | keras-master/keras/applications/imagenet_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for ImageNet data preprocessing & prediction decoding."""
import json
import warnings
import numpy as np
from keras import activations
from keras import backend
from keras.utils import data_utils
from tensorflow.python.util.tf_export import keras_export
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
PREPROCESS_INPUT_DOC = """
Preprocesses a tensor or Numpy array encoding a batch of images.
Usage example with `applications.MobileNet`:
```python
i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
x = tf.cast(i, tf.float32)
x = tf.keras.applications.mobilenet.preprocess_input(x)
core = tf.keras.applications.MobileNet()
x = core(x)
model = tf.keras.Model(inputs=[i], outputs=[x])
image = tf.image.decode_png(tf.io.read_file('file.png'))
result = model(image)
```
Args:
x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
channels, with values in the range [0, 255].
The preprocessed data are written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").{mode}
Returns:
Preprocessed `numpy.array` or a `tf.Tensor` with type `float32`.
{ret}
Raises:
{error}
"""
PREPROCESS_INPUT_MODE_DOC = """
mode: One of "caffe", "tf" or "torch". Defaults to "caffe".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
"""
PREPROCESS_INPUT_DEFAULT_ERROR_DOC = """
ValueError: In case of unknown `mode` or `data_format` argument."""
PREPROCESS_INPUT_ERROR_DOC = """
ValueError: In case of unknown `data_format` argument."""
PREPROCESS_INPUT_RET_DOC_TF = """
The inputs pixel values are scaled between -1 and 1, sample-wise."""
PREPROCESS_INPUT_RET_DOC_TORCH = """
The input pixels values are scaled between 0 and 1 and each channel is
normalized with respect to the ImageNet dataset."""
PREPROCESS_INPUT_RET_DOC_CAFFE = """
The images are converted from RGB to BGR, then each color channel is
zero-centered with respect to the ImageNet dataset, without scaling."""
@keras_export('keras.applications.imagenet_utils.preprocess_input')
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {'caffe', 'tf', 'torch'}:
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Expected data_format to be one of `channels_first` or '
f'`channels_last`. Received: data_format={data_format}')
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(
x, data_format=data_format, mode=mode)
else:
return _preprocess_symbolic_input(
x, data_format=data_format, mode=mode)
preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format(
mode=PREPROCESS_INPUT_MODE_DOC,
ret='',
error=PREPROCESS_INPUT_DEFAULT_ERROR_DOC)
@keras_export('keras.applications.imagenet_utils.decode_predictions')
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
Args:
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return. Defaults to 5.
Returns:
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises:
ValueError: In case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _preprocess_numpy_input(x, data_format, mode):
"""Preprocesses a Numpy array encoding a batch of images.
Args:
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed Numpy array.
"""
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == 'tf':
x /= 127.5
x -= 1.
return x
elif mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Args:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
if mode == 'tf':
x /= 127.5
x -= 1.
return x
elif mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if backend.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
mean_tensor = backend.constant(-np.array(mean))
# Zero-center by mean pixel
if backend.dtype(x) != backend.dtype(mean_tensor):
x = backend.bias_add(
x, backend.cast(mean_tensor, backend.dtype(x)), data_format=data_format)
else:
x = backend.bias_add(x, mean_tensor, data_format)
if std is not None:
std_tensor = backend.constant(np.array(std), dtype=backend.dtype(x))
if data_format == 'channels_first':
std_tensor = backend.reshape(std_tensor, (-1, 1, 1))
x /= std_tensor
return x
def obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate a model's input shape.
Args:
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn('This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.')
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn('This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.')
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting `include_top=True` '
'and loading `imagenet` weights, '
f'`input_shape` should be {default_shape}. '
f'Received: input_shape={input_shape}')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; Received '
f'`input_shape={input_shape}`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError(f'Input size must be at least {min_size}'
f'x{min_size}; Received: '
f'input_shape={input_shape}')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; Received '
f'`input_shape={input_shape}`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least '
f'{min_size}x{min_size}; Received: '
f'input_shape={input_shape}')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
f'Received: input_shape={input_shape}')
return input_shape
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
def validate_activation(classifier_activation, weights):
"""validates that the classifer_activation is compatible with the weights.
Args:
classifier_activation: str or callable activation function
weights: The pretrained weights to load.
Raises:
ValueError: if an activation other than `None` or `softmax` are used with
pretrained weights.
"""
if weights is None:
return
classifier_activation = activations.get(classifier_activation)
if classifier_activation not in {
activations.get('softmax'),
activations.get(None)
}:
raise ValueError('Only `None` and `softmax` activations are allowed '
'for the `classifier_activation` argument when using '
'pretrained weights, with `include_top=True`; Received: '
f'classifier_activation={classifier_activation}')
| 15,197 | 33.778032 | 80 | py |
keras | keras-master/keras/applications/resnet_v2.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""ResNet v2 models for Keras.
Reference:
- [Identity Mappings in Deep Residual Networks]
(https://arxiv.org/abs/1603.05027) (CVPR 2016)
"""
from keras.applications import imagenet_utils
from keras.applications import resnet
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.applications.resnet_v2.ResNet50V2',
'keras.applications.ResNet50V2')
def ResNet50V2(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet50V2 architecture."""
def stack_fn(x):
x = resnet.stack2(x, 64, 3, name='conv2')
x = resnet.stack2(x, 128, 4, name='conv3')
x = resnet.stack2(x, 256, 6, name='conv4')
return resnet.stack2(x, 512, 3, stride1=1, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet50v2',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
@keras_export('keras.applications.resnet_v2.ResNet101V2',
'keras.applications.ResNet101V2')
def ResNet101V2(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet101V2 architecture."""
def stack_fn(x):
x = resnet.stack2(x, 64, 3, name='conv2')
x = resnet.stack2(x, 128, 4, name='conv3')
x = resnet.stack2(x, 256, 23, name='conv4')
return resnet.stack2(x, 512, 3, stride1=1, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet101v2',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
@keras_export('keras.applications.resnet_v2.ResNet152V2',
'keras.applications.ResNet152V2')
def ResNet152V2(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet152V2 architecture."""
def stack_fn(x):
x = resnet.stack2(x, 64, 3, name='conv2')
x = resnet.stack2(x, 128, 8, name='conv3')
x = resnet.stack2(x, 256, 36, name='conv4')
return resnet.stack2(x, 512, 3, stride1=1, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet152v2',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
@keras_export('keras.applications.resnet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='tf')
@keras_export('keras.applications.resnet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Identity Mappings in Deep Residual Networks]
(https://arxiv.org/abs/1603.05027) (CVPR 2016)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For ResNetV2, call `tf.keras.applications.resnet_v2.preprocess_input` on your
inputs before passing them to the model.
`resnet_v2.preprocess_input` will scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
setattr(ResNet50V2, '__doc__', ResNet50V2.__doc__ + DOC)
setattr(ResNet101V2, '__doc__', ResNet101V2.__doc__ + DOC)
setattr(ResNet152V2, '__doc__', ResNet152V2.__doc__ + DOC)
| 6,741 | 32.879397 | 87 | py |
keras | keras-master/keras/applications/vgg19.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG19 model for Keras.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/keras-applications/'
'vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/vgg19/'
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.vgg19.VGG19', 'keras.applications.VGG19')
def VGG19(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the VGG19 architecture.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG19, call `tf.keras.applications.vgg19.preprocess_input` on your
inputs before passing them to the model.
`vgg19.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet dataset,
without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded. '
f'Received: `weights={weights}.`')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000. '
f'Received: `classes={classes}.`')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv1')(
img_input)
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='vgg19')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='cbe5617147190e668d6c5d5026f83318')
else:
weights_path = data_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.vgg19.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.vgg19.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 9,818 | 38.276 | 87 | py |
keras | keras-master/keras/applications/inception_v3.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Inception V3 model for Keras.
Reference:
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.inception_v3.InceptionV3',
'keras.applications.InceptionV3')
def InceptionV3(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the Inception v3 architecture.
Reference:
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For `InceptionV3`, call `tf.keras.applications.inception_v3.preprocess_input`
on your inputs before passing them to the model.
`inception_v3.preprocess_input` will scale input pixels between -1 and 1.
Args:
include_top: Boolean, whether to include the fully-connected
layer at the top, as the last layer of the network. Default to `True`.
weights: One of `None` (random initialization),
`imagenet` (pre-training on ImageNet),
or the path to the weights file to be loaded. Default to `imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
`input_shape` will be ignored if the `input_tensor` is provided.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. Default to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded; '
f'Received: weights={weights}')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000; '
f'Received classes={classes}')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(
x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
axis=channel_axis)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(
x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='inception_v3')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = data_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
Args:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if backend.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(
x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation('relu', name=name)(x)
return x
@keras_export('keras.applications.inception_v3.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.inception_v3.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 16,038 | 36.562061 | 87 | py |
keras | keras-master/keras/applications/mobilenet_v3.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=missing-function-docstring
"""MobileNet v3 models for Keras."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras import models
from keras.applications import imagenet_utils
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# TODO(scottzhu): Change this to the GCS path.
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v3/')
WEIGHTS_HASHES = {
'large_224_0.75_float': ('765b44a33ad4005b3ac83185abf1d0eb',
'e7b4d1071996dd51a2c2ca2424570e20'),
'large_224_1.0_float': ('59e551e166be033d707958cf9e29a6a7',
'037116398e07f018c0005ffcb0406831'),
'large_minimalistic_224_1.0_float': ('675e7b876c45c57e9e63e6d90a36599c',
'a2c33aed672524d1d0b4431808177695'),
'small_224_0.75_float': ('cb65d4e5be93758266aa0a7f2c6708b7',
'4d2fe46f1c1f38057392514b0df1d673'),
'small_224_1.0_float': ('8768d4c2e7dee89b9d02b2d03d65d862',
'be7100780f875c06bcab93d76641aa26'),
'small_minimalistic_224_1.0_float': ('99cd97fb2fcdad2bf028eb838de69e37',
'20d4e357df3f7a6361f3a288857b1051'),
}
layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Searching for MobileNetV3](
https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
The following table describes the performance of MobileNets v3:
------------------------------------------------------------------------
MACs stands for Multiply Adds
|Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)|
|---|---|---|---|---|
| mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 |
| mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 |
| mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 |
| mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 |
| mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 |
| mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 |
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For ModelNetV3, by default input preprocessing is included as a part of the
model (as a `Rescaling` layer), and thus
`tf.keras.applications.mobilenet_v3.preprocess_input` is actually a
pass-through function. In this use case, ModelNetV3 models expect their inputs
to be float tensors of pixels with values in the [0-255] range.
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
layer) can be disabled by setting `include_preprocessing` argument to False.
With preprocessing disabled ModelNetV3 models expect their inputs to be float
tensors of pixels with values in the [-1, 1] range.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
minimalistic: In addition to large and small models this module also
contains so-called minimalistic models, these models have the same
per-layer dimensions characteristic as MobilenetV3 however, they don't
utilize any of the advanced blocks (squeeze-and-excite units, hard-swish,
and 5x5 convolutions). While these models are less efficient on CPU, they
are much more performant on GPU/DSP.
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Integer, optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
dropout_rate: fraction of the input units to drop on the last layer.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
include_preprocessing: Boolean, whether to include the preprocessing
layer (`Rescaling`) at the bottom of the network. Defaults to `True`.
Call arguments:
inputs: A floating point `numpy.array` or a `tf.Tensor`, 4D with 3 color
channels, with values in the range [0, 255] if `include_preprocessing`
is True and in the range [-1, 1] otherwise.
Returns:
A `keras.Model` instance.
"""
def MobileNetV3(stack_fn,
last_point_ch,
input_shape=None,
alpha=1.0,
model_type='large',
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax',
include_preprocessing=True):
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded. '
f'Received weights={weights}')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000. '
f'Received classes={classes}')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is not type input_tensor. '
f'Received type(input_tensor)={type(input_tensor)}')
if is_input_t_tensor:
if backend.image_data_format() == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('When backend.image_data_format()=channels_first, '
'input_shape[1] must equal '
'backend.int_shape(input_tensor)[1]. Received '
f'input_shape={input_shape}, '
'backend.int_shape(input_tensor)='
f'{backend.int_shape(input_tensor)}')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError('input_shape[1] must equal '
'backend.int_shape(input_tensor)[2]. Received '
f'input_shape={input_shape}, '
'backend.int_shape(input_tensor)='
f'{backend.int_shape(input_tensor)}')
else:
raise ValueError('input_tensor specified: ', input_tensor,
'is not a keras tensor')
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor: ', input_tensor, 'is type: ',
type(input_tensor), 'which is not a valid type')
if backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
input_shape = (3, cols, rows)
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
input_shape = (cols, rows, 3)
# If input_shape is None and input_tensor is None using standard shape
if input_shape is None and input_tensor is None:
input_shape = (None, None, 3)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if rows and cols and (rows < 32 or cols < 32):
raise ValueError('Input size must be at least 32x32; Received `input_shape='
f'{input_shape}`')
if weights == 'imagenet':
if (not minimalistic and alpha not in [0.75, 1.0]
or minimalistic and alpha != 1.0):
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of `0.75`, `1.0` for non minimalistic '
'or `1.0` for minimalistic only.')
if rows != cols or rows != 224:
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not 224. '
'Weights for input shape (224, 224) will be '
'loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
if minimalistic:
kernel = 3
activation = relu
se_ratio = None
else:
kernel = 5
activation = hard_swish
se_ratio = 0.25
x = img_input
if include_preprocessing:
x = layers.Rescaling(scale=1. / 127.5, offset=-1.)(x)
x = layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding='same',
use_bias=False,
name='Conv')(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3,
momentum=0.999, name='Conv/BatchNorm')(x)
x = activation(x)
x = stack_fn(x, kernel, activation, se_ratio)
last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_point_ch = _depth(last_point_ch * alpha)
x = layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding='same',
use_bias=False,
name='Conv_1')(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3,
momentum=0.999, name='Conv_1/BatchNorm')(x)
x = activation(x)
x = layers.GlobalAveragePooling2D(keepdims=True)(x)
x = layers.Conv2D(
last_point_ch,
kernel_size=1,
padding='same',
use_bias=True,
name='Conv_2')(x)
x = activation(x)
if include_top:
if dropout_rate > 0:
x = layers.Dropout(dropout_rate)(x)
x = layers.Conv2D(classes, kernel_size=1, padding='same', name='Logits')(x)
x = layers.Flatten()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='Predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='MobilenetV3' + model_type)
# Load weights.
if weights == 'imagenet':
model_name = '{}{}_224_{}_float'.format(
model_type, '_minimalistic' if minimalistic else '', str(alpha))
if include_top:
file_name = 'weights_mobilenet_v3_' + model_name + '.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = 'weights_mobilenet_v3_' + model_name + '_no_top.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHT_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.MobileNetV3Small')
def MobileNetV3Small(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax',
include_preprocessing=True):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
x = _inverted_res_block(x, 72. / 16, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 88. / 24, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation,
10)
return x
return MobileNetV3(stack_fn, 1024, input_shape, alpha, 'small', minimalistic,
include_top, weights, input_tensor, classes, pooling,
dropout_rate, classifier_activation, include_preprocessing)
@keras_export('keras.applications.MobileNetV3Large')
def MobileNetV3Large(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax',
include_preprocessing=True):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation,
12)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
13)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
14)
return x
return MobileNetV3(stack_fn, 1280, input_shape, alpha, 'large', minimalistic,
include_top, weights, input_tensor, classes, pooling,
dropout_rate, classifier_activation, include_preprocessing)
MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Small')
MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Large')
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.)(x + 3.) * (1. / 6.)
def hard_swish(x):
return layers.Multiply()([x, hard_sigmoid(x)])
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py
def _depth(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _se_block(inputs, filters, se_ratio, prefix):
x = layers.GlobalAveragePooling2D(
keepdims=True, name=prefix + 'squeeze_excite/AvgPool')(
inputs)
x = layers.Conv2D(
_depth(filters * se_ratio),
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv')(
x)
x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)
x = layers.Conv2D(
filters,
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv_1')(
x)
x = hard_sigmoid(x)
x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
return x
def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio,
activation, block_id):
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
shortcut = x
prefix = 'expanded_conv/'
infilters = backend.int_shape(x)[channel_axis]
if block_id:
# Expand
prefix = 'expanded_conv_{}/'.format(block_id)
x = layers.Conv2D(
_depth(infilters * expansion),
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'expand')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand/BatchNorm')(
x)
x = activation(x)
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=prefix + 'depthwise/pad')(
x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding='same' if stride == 1 else 'valid',
use_bias=False,
name=prefix + 'depthwise')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise/BatchNorm')(
x)
x = activation(x)
if se_ratio:
x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
x = layers.Conv2D(
filters,
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'project')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project/BatchNorm')(
x)
if stride == 1 and infilters == filters:
x = layers.Add(name=prefix + 'Add')([shortcut, x])
return x
@keras_export('keras.applications.mobilenet_v3.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the mobilenet_v3 model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export('keras.applications.mobilenet_v3.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 23,489 | 38.412752 | 87 | py |
keras | keras-master/keras/applications/nasnet.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""NASNet-A models for Keras.
NASNet refers to Neural Architecture Search Network, a family of models
that were designed automatically by learning the model architectures
directly on the dataset of interest.
Here we consider NASNet-A, the highest performance model that was found
for the CIFAR-10 dataset, and then extended to ImageNet 2012 dataset,
obtaining state of the art performance on CIFAR-10 and ImageNet 2012.
Only the NASNet-A models, and their respective weights, which are suited
for ImageNet 2012 are provided.
The below table describes the performance on ImageNet 2012:
--------------------------------------------------------------------------------
Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)
--------------------------------------------------------------------------------
| NASNet-A (4 @ 1056) | 74.0 % | 91.6 % | 564 M | 5.3 |
| NASNet-A (6 @ 4032) | 82.7 % | 96.2 % | 23.8 B | 88.9 |
--------------------------------------------------------------------------------
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/nasnet/')
NASNET_MOBILE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-mobile.h5'
NASNET_MOBILE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-mobile-no-top.h5'
NASNET_LARGE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-large.h5'
NASNET_LARGE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-large-no-top.h5'
layers = VersionAwareLayers()
def NASNet(input_shape=None,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
default_size=None,
classifier_activation='softmax'):
"""Instantiates a NASNet model.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For NasNet, call `tf.keras.applications.nasnet.preprocess_input`
on your inputs before passing them to the model.
`nasnet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, the input shape
is by default `(331, 331, 3)` for NASNetLarge and
`(224, 224, 3)` for NASNetMobile.
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: Number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
num_blocks: Number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_block_filters: Number of filters in the initial stem block
skip_reduction: Whether to skip the reduction step at the tail
end of the network.
filter_multiplier: Controls the width of the network.
- If `filter_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filter_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filter_multiplier` = 1, default number of filters from the
paper are used at each layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: Specifies the default image size of the model
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
if (isinstance(input_shape, tuple) and None in input_shape and
weights == 'imagenet'):
raise ValueError('When specifying the input shape of a NASNet'
' and loading `ImageNet` weights, '
'the input_shape argument must be static '
'(no None entries). Got: `input_shape=' +
str(input_shape) + '`.')
if default_size is None:
default_size = 331
# Determine proper input shape and default size.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=True,
weights=weights)
if backend.image_data_format() != 'channels_last':
logging.warning('The NASNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
backend.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if penultimate_filters % (24 * (filter_multiplier**2)) != 0:
raise ValueError(
'For NASNet-A models, the `penultimate_filters` must be a multiple '
'of 24 * (`filter_multiplier` ** 2). Current value: %d' %
penultimate_filters)
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
filters = penultimate_filters // 24
x = layers.Conv2D(
stem_block_filters, (3, 3),
strides=(2, 2),
padding='valid',
use_bias=False,
name='stem_conv1',
kernel_initializer='he_normal')(
img_input)
x = layers.BatchNormalization(
axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='stem_bn1')(
x)
p = None
x, p = _reduction_a_cell(
x, p, filters // (filter_multiplier**2), block_id='stem_1')
x, p = _reduction_a_cell(
x, p, filters // filter_multiplier, block_id='stem_2')
for i in range(num_blocks):
x, p = _normal_a_cell(x, p, filters, block_id='%d' % (i))
x, p0 = _reduction_a_cell(
x, p, filters * filter_multiplier, block_id='reduce_%d' % (num_blocks))
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x, p, filters * filter_multiplier, block_id='%d' % (num_blocks + i + 1))
x, p0 = _reduction_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id='reduce_%d' % (2 * num_blocks))
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id='%d' % (2 * num_blocks + i + 1))
x = layers.Activation('relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = training.Model(inputs, x, name='NASNet')
# Load weights.
if weights == 'imagenet':
if default_size == 224: # mobile version
if include_top:
weights_path = data_utils.get_file(
'nasnet_mobile.h5',
NASNET_MOBILE_WEIGHT_PATH,
cache_subdir='models',
file_hash='020fb642bf7360b370c678b08e0adf61')
else:
weights_path = data_utils.get_file(
'nasnet_mobile_no_top.h5',
NASNET_MOBILE_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='1ed92395b5b598bdda52abe5c0dbfd63')
model.load_weights(weights_path)
elif default_size == 331: # large version
if include_top:
weights_path = data_utils.get_file(
'nasnet_large.h5',
NASNET_LARGE_WEIGHT_PATH,
cache_subdir='models',
file_hash='11577c9a518f0070763c2b964a382f17')
else:
weights_path = data_utils.get_file(
'nasnet_large_no_top.h5',
NASNET_LARGE_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='d81d89dc07e6e56530c4e77faddd61b5')
model.load_weights(weights_path)
else:
raise ValueError('ImageNet weights can only be loaded with NASNetLarge'
' or NASNetMobile')
elif weights is not None:
model.load_weights(weights)
if old_data_format:
backend.set_image_data_format(old_data_format)
return model
@keras_export('keras.applications.nasnet.NASNetMobile',
'keras.applications.NASNetMobile')
def NASNetMobile(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a Mobile NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
For loading `imagenet` weights, `input_shape` should be (224, 224, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: In case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=1056,
num_blocks=4,
stem_block_filters=32,
skip_reduction=False,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224)
@keras_export('keras.applications.nasnet.NASNetLarge',
'keras.applications.NASNetLarge')
def NASNetLarge(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
For loading `imagenet` weights, `input_shape` should be (331, 331, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=331)
def _separable_conv_block(ip,
filters,
kernel_size=(3, 3),
strides=(1, 1),
block_id=None):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Args:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('separable_conv_block_%s' % block_id):
x = layers.Activation('relu')(ip)
if strides == (2, 2):
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name='separable_conv_1_pad_%s' % block_id)(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.SeparableConv2D(
filters,
kernel_size,
strides=strides,
name='separable_conv_1_%s' % block_id,
padding=conv_pad,
use_bias=False,
kernel_initializer='he_normal')(
x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_1_bn_%s' % (block_id))(
x)
x = layers.Activation('relu')(x)
x = layers.SeparableConv2D(
filters,
kernel_size,
name='separable_conv_2_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_2_bn_%s' % (block_id))(
x)
return x
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Args:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
img_dim = 2 if backend.image_data_format() == 'channels_first' else -2
ip_shape = backend.int_shape(ip)
if p is not None:
p_shape = backend.int_shape(p)
with backend.name_scope('adjust_block'):
if p is None:
p = ip
elif p_shape[img_dim] != ip_shape[img_dim]:
with backend.name_scope('adjust_reduction_block_%s' % block_id):
p = layers.Activation('relu', name='adjust_relu_1_%s' % block_id)(p)
p1 = layers.AveragePooling2D((1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_1_%s' % block_id)(
p)
p1 = layers.Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_1_%s' % block_id,
kernel_initializer='he_normal')(
p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D((1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_2_%s' % block_id)(
p2)
p2 = layers.Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_2_%s' % block_id,
kernel_initializer='he_normal')(
p2)
p = layers.concatenate([p1, p2], axis=channel_dim)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
elif p_shape[channel_dim] != filters:
with backend.name_scope('adjust_projection_block_%s' % block_id):
p = layers.Activation('relu')(p)
p = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='adjust_conv_projection_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
p)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
return p
def _normal_a_cell(ip, p, filters, block_id=None):
"""Adds a Normal cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('normal_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation('relu')(ip)
h = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='normal_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='normal_bn_1_%s' % block_id)(
h)
with backend.name_scope('block_1'):
x1_1 = _separable_conv_block(
h, filters, kernel_size=(5, 5), block_id='normal_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p, filters, block_id='normal_right1_%s' % block_id)
x1 = layers.add([x1_1, x1_2], name='normal_add_1_%s' % block_id)
with backend.name_scope('block_2'):
x2_1 = _separable_conv_block(
p, filters, (5, 5), block_id='normal_left2_%s' % block_id)
x2_2 = _separable_conv_block(
p, filters, (3, 3), block_id='normal_right2_%s' % block_id)
x2 = layers.add([x2_1, x2_2], name='normal_add_2_%s' % block_id)
with backend.name_scope('block_3'):
x3 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_left3_%s' % (block_id))(
h)
x3 = layers.add([x3, p], name='normal_add_3_%s' % block_id)
with backend.name_scope('block_4'):
x4_1 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_left4_%s' % (block_id))(
p)
x4_2 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_right4_%s' % (block_id))(
p)
x4 = layers.add([x4_1, x4_2], name='normal_add_4_%s' % block_id)
with backend.name_scope('block_5'):
x5 = _separable_conv_block(
h, filters, block_id='normal_left5_%s' % block_id)
x5 = layers.add([x5, h], name='normal_add_5_%s' % block_id)
x = layers.concatenate([p, x1, x2, x3, x4, x5],
axis=channel_dim,
name='normal_concat_%s' % block_id)
return x, ip
def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('reduction_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation('relu')(ip)
h = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='reduction_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='reduction_bn_1_%s' % block_id)(
h)
h3 = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(h, 3),
name='reduction_pad_1_%s' % block_id)(
h)
with backend.name_scope('block_1'):
x1_1 = _separable_conv_block(
h,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right1_%s' % block_id)
x1 = layers.add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)
with backend.name_scope('block_2'):
x2_1 = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_left2_%s' % block_id)(
h3)
x2_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right2_%s' % block_id)
x2 = layers.add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)
with backend.name_scope('block_3'):
x3_1 = layers.AveragePooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_left3_%s' % block_id)(
h3)
x3_2 = _separable_conv_block(
p,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_right3_%s' % block_id)
x3 = layers.add([x3_1, x3_2], name='reduction_add3_%s' % block_id)
with backend.name_scope('block_4'):
x4 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='reduction_left4_%s' % block_id)(
x1)
x4 = layers.add([x2, x4])
with backend.name_scope('block_5'):
x5_1 = _separable_conv_block(
x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)
x5_2 = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_right5_%s' % block_id)(
h3)
x5 = layers.add([x5_1, x5_2], name='reduction_add4_%s' % block_id)
x = layers.concatenate([x2, x3, x4, x5],
axis=channel_dim,
name='reduction_concat_%s' % block_id)
return x, ip
@keras_export('keras.applications.nasnet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.nasnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 30,441 | 36.215159 | 87 | py |
keras | keras-master/keras/applications/__init__.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
| 765 | 46.875 | 80 | py |
keras | keras-master/keras/applications/imagenet_utils_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for imagenet_utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras.applications import imagenet_utils as utils
from keras.mixed_precision.policy import set_policy
class TestImageNetUtils(keras_parameterized.TestCase):
def test_preprocess_input(self):
# Test invalid mode check
x = np.random.uniform(0, 255, (10, 10, 3))
with self.assertRaises(ValueError):
utils.preprocess_input(x, mode='some_unknown_mode')
# Test image batch with float and int image input
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype('int32')
self.assertEqual(utils.preprocess_input(x).shape, x.shape)
self.assertEqual(utils.preprocess_input(xint).shape, xint.shape)
out1 = utils.preprocess_input(x, 'channels_last')
out1int = utils.preprocess_input(xint, 'channels_last')
out2 = utils.preprocess_input(
np.transpose(x, (0, 3, 1, 2)), 'channels_first')
out2int = utils.preprocess_input(
np.transpose(xint, (0, 3, 1, 2)), 'channels_first')
self.assertAllClose(out1, out2.transpose(0, 2, 3, 1))
self.assertAllClose(out1int, out2int.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
xint = x.astype('int32')
self.assertEqual(utils.preprocess_input(x).shape, x.shape)
self.assertEqual(utils.preprocess_input(xint).shape, xint.shape)
out1 = utils.preprocess_input(x, 'channels_last')
out1int = utils.preprocess_input(xint, 'channels_last')
out2 = utils.preprocess_input(np.transpose(x, (2, 0, 1)), 'channels_first')
out2int = utils.preprocess_input(
np.transpose(xint, (2, 0, 1)), 'channels_first')
self.assertAllClose(out1, out2.transpose(1, 2, 0))
self.assertAllClose(out1int, out2int.transpose(1, 2, 0))
# Test that writing over the input data works predictably
for mode in ['torch', 'tf']:
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype('int')
x2 = utils.preprocess_input(x, mode=mode)
xint2 = utils.preprocess_input(xint)
self.assertAllClose(x, x2)
self.assertNotEqual(xint.astype('float').max(), xint2.max())
# Caffe mode works differently from the others
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype('int')
x2 = utils.preprocess_input(x, data_format='channels_last', mode='caffe')
xint2 = utils.preprocess_input(xint)
self.assertAllClose(x, x2[..., ::-1])
self.assertNotEqual(xint.astype('float').max(), xint2.max())
@parameterized.named_parameters([
{
'testcase_name': 'mode_torch',
'mode': 'torch'
},
{
'testcase_name': 'mode_tf',
'mode': 'tf'
},
{
'testcase_name': 'mode_caffe',
'mode': 'caffe'
},
])
def test_preprocess_input_symbolic(self, mode):
# Test image batch
x = np.random.uniform(0, 255, (2, 10, 10, 3))
inputs = keras.layers.Input(shape=x.shape[1:])
outputs = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode),
output_shape=x.shape[1:])(
inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.predict(x).shape, x.shape)
outputs1 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, 'channels_last', mode=mode),
output_shape=x.shape[1:])(
inputs)
model1 = keras.Model(inputs, outputs1)
out1 = model1.predict(x)
x2 = np.transpose(x, (0, 3, 1, 2))
inputs2 = keras.layers.Input(shape=x2.shape[1:])
outputs2 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, 'channels_first', mode=mode),
output_shape=x2.shape[1:])(
inputs2)
model2 = keras.Model(inputs2, outputs2)
out2 = model2.predict(x2)
self.assertAllClose(out1, out2.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
inputs = keras.layers.Input(shape=x.shape)
outputs = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode), output_shape=x.shape)(
inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.predict(x[np.newaxis])[0].shape, x.shape)
outputs1 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, 'channels_last', mode=mode),
output_shape=x.shape)(
inputs)
model1 = keras.Model(inputs, outputs1)
out1 = model1.predict(x[np.newaxis])[0]
x2 = np.transpose(x, (2, 0, 1))
inputs2 = keras.layers.Input(shape=x2.shape)
outputs2 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, 'channels_first', mode=mode),
output_shape=x2.shape)(
inputs2)
model2 = keras.Model(inputs2, outputs2)
out2 = model2.predict(x2[np.newaxis])[0]
self.assertAllClose(out1, out2.transpose(1, 2, 0))
@parameterized.named_parameters([
{
'testcase_name': 'mode_torch',
'mode': 'torch'
},
{
'testcase_name': 'mode_tf',
'mode': 'tf'
},
{
'testcase_name': 'mode_caffe',
'mode': 'caffe'
},
])
def test_preprocess_input_symbolic_mixed_precision(self, mode):
set_policy('mixed_float16')
shape = (20, 20, 3)
inputs = keras.layers.Input(shape=shape)
try:
keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode), output_shape=shape)(
inputs)
finally:
set_policy('float32')
@parameterized.named_parameters([
{'testcase_name': 'channels_last_format',
'data_format': 'channels_last'},
{'testcase_name': 'channels_first_format',
'data_format': 'channels_first'},
])
def test_obtain_input_shape(self, data_format):
# input_shape and default_size are not identical.
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=(224, 224, 3),
default_size=299,
min_size=139,
data_format='channels_last',
require_flatten=True,
weights='imagenet')
# Test invalid use cases
shape = (139, 139)
if data_format == 'channels_last':
input_shape = shape + (99,)
else:
input_shape = (99,) + shape
# input_shape is smaller than min_size.
shape = (100, 100)
if data_format == 'channels_last':
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False)
# shape is 1D.
shape = (100,)
if data_format == 'channels_last':
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False)
# the number of channels is 5 not 3.
shape = (100, 100)
if data_format == 'channels_last':
input_shape = shape + (5,)
else:
input_shape = (5,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False)
# require_flatten=True with dynamic input shape.
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=True)
# test include top
self.assertEqual(utils.obtain_input_shape(
input_shape=(3, 200, 200),
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=True), (3, 200, 200))
self.assertEqual(utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_last',
require_flatten=False), (None, None, 3))
self.assertEqual(utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=False), (3, None, None))
self.assertEqual(utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_last',
require_flatten=False), (None, None, 3))
self.assertEqual(utils.obtain_input_shape(
input_shape=(150, 150, 3),
default_size=None,
min_size=139,
data_format='channels_last',
require_flatten=False), (150, 150, 3))
self.assertEqual(utils.obtain_input_shape(
input_shape=(3, None, None),
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=False), (3, None, None))
if __name__ == '__main__':
tf.test.main()
| 9,851 | 32.39661 | 80 | py |
keras | keras-master/keras/applications/applications_load_weight_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras applications."""
import tensorflow.compat.v2 as tf
from absl import flags
from absl.testing import parameterized
import numpy as np
from keras.applications import densenet
from keras.applications import efficientnet
from keras.applications import inception_resnet_v2
from keras.applications import inception_v3
from keras.applications import mobilenet
from keras.applications import mobilenet_v2
from keras.applications import mobilenet_v3
from keras.applications import nasnet
from keras.applications import resnet
from keras.applications import resnet_v2
from keras.applications import vgg16
from keras.applications import vgg19
from keras.applications import xception
from keras.preprocessing import image
from keras.utils import data_utils
ARG_TO_MODEL = {
'resnet': (resnet, [resnet.ResNet50, resnet.ResNet101, resnet.ResNet152]),
'resnet_v2': (resnet_v2, [resnet_v2.ResNet50V2, resnet_v2.ResNet101V2,
resnet_v2.ResNet152V2]),
'vgg16': (vgg16, [vgg16.VGG16]),
'vgg19': (vgg19, [vgg19.VGG19]),
'xception': (xception, [xception.Xception]),
'inception_v3': (inception_v3, [inception_v3.InceptionV3]),
'inception_resnet_v2': (inception_resnet_v2,
[inception_resnet_v2.InceptionResNetV2]),
'mobilenet': (mobilenet, [mobilenet.MobileNet]),
'mobilenet_v2': (mobilenet_v2, [mobilenet_v2.MobileNetV2]),
'mobilenet_v3_small': (mobilenet_v3, [mobilenet_v3.MobileNetV3Small]),
'mobilenet_v3_large': (mobilenet_v3, [mobilenet_v3.MobileNetV3Large]),
'densenet': (densenet, [densenet.DenseNet121,
densenet.DenseNet169, densenet.DenseNet201]),
'nasnet_mobile': (nasnet, [nasnet.NASNetMobile]),
'nasnet_large': (nasnet, [nasnet.NASNetLarge]),
'efficientnet': (efficientnet,
[efficientnet.EfficientNetB0, efficientnet.EfficientNetB1,
efficientnet.EfficientNetB2, efficientnet.EfficientNetB3,
efficientnet.EfficientNetB4, efficientnet.EfficientNetB5,
efficientnet.EfficientNetB6, efficientnet.EfficientNetB7])
}
TEST_IMAGE_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/tests/elephant.jpg')
_IMAGENET_CLASSES = 1000
# Add a flag to define which application module file is tested.
# This is set as an 'arg' in the build target to guarantee that
# it only triggers the tests of the application models in the module
# if that module file has been modified.
FLAGS = flags.FLAGS
flags.DEFINE_string('module', None,
'Application module used in this test.')
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
test_image = data_utils.get_file('elephant.jpg', TEST_IMAGE_PATH)
img = image.load_img(test_image, target_size=tuple(target_size))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
class ApplicationsLoadWeightTest(tf.test.TestCase, parameterized.TestCase):
def assertShapeEqual(self, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
'Shapes are different rank: %s vs %s' % (shape1, shape2))
if shape1 != shape2:
raise AssertionError('Shapes differ: %s vs %s' % (shape1, shape2))
def test_application_pretrained_weights_loading(self):
app_module = ARG_TO_MODEL[FLAGS.module][0]
apps = ARG_TO_MODEL[FLAGS.module][1]
for app in apps:
model = app(weights='imagenet')
self.assertShapeEqual(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant(model.input_shape[1:3])
x = app_module.preprocess_input(x)
preds = model.predict(x)
names = [p[1] for p in app_module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
self.assertIn('African_elephant', names[:3])
if __name__ == '__main__':
tf.test.main()
| 4,840 | 40.732759 | 80 | py |
keras | keras-master/keras/applications/xception.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
TF_WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'xception/xception_weights_tf_dim_ordering_tf_kernels.h5')
TF_WEIGHTS_PATH_NO_TOP = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.xception.Xception',
'keras.applications.Xception')
def Xception(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the Xception architecture.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input image size for this model is 299x299.
Note: each Keras Application expects a specific kind of input preprocessing.
For Xception, call `tf.keras.applications.xception.preprocess_input` on your
inputs before passing them to the model.
`xception.preprocess_input` will scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True,
and if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
x = layers.Conv2D(
32, (3, 3),
strides=(2, 2),
use_bias=False,
name='block1_conv1')(img_input)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
x = layers.Activation('relu', name='block1_conv1_act')(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
x = layers.Activation('relu', name='block1_conv2_act')(x)
residual = layers.Conv2D(
128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
x = layers.Activation('relu', name='block2_sepconv2_act')(x)
x = layers.SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block2_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block3_sepconv1_act')(x)
x = layers.SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
x = layers.Activation('relu', name='block3_sepconv2_act')(x)
x = layers.SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block3_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block4_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
x = layers.Activation('relu', name='block4_sepconv2_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv1_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv2_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv3')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block13_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block13_sepconv1_bn')(x)
x = layers.Activation('relu', name='block13_sepconv2_act')(x)
x = layers.SeparableConv2D(
1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block13_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block13_pool')(x)
x = layers.add([x, residual])
x = layers.SeparableConv2D(
1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block14_sepconv1_bn')(x)
x = layers.Activation('relu', name='block14_sepconv1_act')(x)
x = layers.SeparableConv2D(
2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block14_sepconv2_bn')(x)
x = layers.Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='xception')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
else:
weights_path = data_utils.get_file(
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='b0042744bf5b25fce3cb969f33bebb97')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.xception.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.xception.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 13,000 | 38.159639 | 87 | py |
keras | keras-master/keras/applications/inception_resnet_v2.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Inception-ResNet V2 model for Keras.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_URL = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/inception_resnet_v2/')
layers = None
@keras_export('keras.applications.inception_resnet_v2.InceptionResNetV2',
'keras.applications.InceptionResNetV2')
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the Inception-ResNet v2 architecture.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For InceptionResNetV2, call
`tf.keras.applications.inception_resnet_v2.preprocess_input`
on your inputs before passing them to the model.
`inception_resnet_v2.preprocess_input`
will scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = layers.MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = layers.MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(
x, scale=0.17, block_type='block35', block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(
x, scale=0.1, block_type='block17', block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(
x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(
x, scale=1., activation=None, block_type='block8', block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='inception_resnet_v2')
# Load weights.
if weights == 'imagenet':
if include_top:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='e693bd0210a403b3192acc6073ad2e96')
else:
fname = ('inception_resnet_v2_weights_'
'tf_dim_ordering_tf_kernels_notop.h5')
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
Args:
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(
x)
if not use_bias:
bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = layers.Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds an Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Args:
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of passing
`x` through an inception module) before adding them to the shortcut
branch. Let `r` be the output from the residual branch, the output of this
block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines the network
structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet
blocks are repeated many times in this network. We use `block_idx` to
identify each of the repetitions. For example, the first
Inception-ResNet-A block will have `block_type='block35', block_idx=0`,
and the layer names will have a common prefix `'block35_0'`.
activation: activation function to use at the end of the block (see
[activations](../activations.md)). When `activation=None`, no activation
is applied
(i.e., "linear" activation: `a(x) = x`).
Returns:
Output tensor for the block.
Raises:
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
mixed = layers.Concatenate(
axis=channel_axis, name=block_name + '_mixed')(
branches)
up = conv2d_bn(
mixed,
backend.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = layers.Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=backend.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = layers.Activation(activation, name=block_name + '_ac')(x)
return x
@keras_export('keras.applications.inception_resnet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.inception_resnet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 15,195 | 37.470886 | 87 | py |
keras | keras-master/keras/applications/mobilenet.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 569 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 418 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 290 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 186 | 4.2 |
------------------------------------------------------------------------
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet/')
layers = None
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNet architecture.
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input`
on your inputs before passing them to the model.
`mobilenet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first`
data format). It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would be one
valid value. Default to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally
decreases the number of filters in each layer. - If `alpha` > 1.0,
proportionally increases the number of filters in each layer. - If
`alpha` = 1, default number of filters from the paper are used at each
layer. Default to 1.0.
depth_multiplier: Depth multiplier for depthwise convolution. This is
called the resolution multiplier in the MobileNet paper. Default to 1.0.
dropout: Dropout rate. Default to 0.001.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Default to `True`.
weights: One of `None` (random initialization), 'imagenet' (pre-training
on ImageNet), or the path to the weights file to be loaded. Default to
`imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError(f'Unknown argument(s): {(kwargs,)}')
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded. '
f'Received weights={weights}')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000. '
f'Received classes={classes}')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1. '
'Received depth_multiplier={depth_multiplier}')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only. '
f'Received alpha={alpha}')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be '
'loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
x = layers.GlobalAveragePooling2D(keepdims=True)(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Args:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should
be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width and
height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Args:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(
inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(
x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(
x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
@keras_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 19,722 | 42.157549 | 87 | py |
keras | keras-master/keras/applications/applications_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras applications."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras import backend
from keras.applications import densenet
from keras.applications import efficientnet
from keras.applications import inception_resnet_v2
from keras.applications import inception_v3
from keras.applications import mobilenet
from keras.applications import mobilenet_v2
from keras.applications import mobilenet_v3
from keras.applications import nasnet
from keras.applications import resnet
from keras.applications import resnet_v2
from keras.applications import vgg16
from keras.applications import vgg19
from keras.applications import xception
MODEL_LIST_NO_NASNET = [
(resnet.ResNet50, 2048),
(resnet.ResNet101, 2048),
(resnet.ResNet152, 2048),
(resnet_v2.ResNet50V2, 2048),
(resnet_v2.ResNet101V2, 2048),
(resnet_v2.ResNet152V2, 2048),
(vgg16.VGG16, 512),
(vgg19.VGG19, 512),
(xception.Xception, 2048),
(inception_v3.InceptionV3, 2048),
(inception_resnet_v2.InceptionResNetV2, 1536),
(mobilenet.MobileNet, 1024),
(mobilenet_v2.MobileNetV2, 1280),
(mobilenet_v3.MobileNetV3Small, 1024),
(mobilenet_v3.MobileNetV3Large, 1280),
(densenet.DenseNet121, 1024),
(densenet.DenseNet169, 1664),
(densenet.DenseNet201, 1920),
(efficientnet.EfficientNetB0, 1280),
(efficientnet.EfficientNetB1, 1280),
(efficientnet.EfficientNetB2, 1408),
(efficientnet.EfficientNetB3, 1536),
(efficientnet.EfficientNetB4, 1792),
(efficientnet.EfficientNetB5, 2048),
(efficientnet.EfficientNetB6, 2304),
(efficientnet.EfficientNetB7, 2560),
]
NASNET_LIST = [
(nasnet.NASNetMobile, 1056),
(nasnet.NASNetLarge, 4032),
]
MODEL_LIST = MODEL_LIST_NO_NASNET + NASNET_LIST
class ApplicationsTest(tf.test.TestCase, parameterized.TestCase):
def assertShapeEqual(self, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
'Shapes are different rank: %s vs %s' % (shape1, shape2))
for v1, v2 in zip(shape1, shape2):
if v1 != v2:
raise AssertionError('Shapes differ: %s vs %s' % (shape1, shape2))
@parameterized.parameters(*MODEL_LIST)
def test_application_base(self, app, _):
# Can be instantiated with default arguments
model = app(weights=None)
# Can be serialized and deserialized
config = model.get_config()
reconstructed_model = model.__class__.from_config(config)
self.assertEqual(len(model.weights), len(reconstructed_model.weights))
backend.clear_session()
@parameterized.parameters(*MODEL_LIST)
def test_application_notop(self, app, last_dim):
if 'NASNet' or 'MobileNetV3' in app.__name__:
only_check_last_dim = True
else:
only_check_last_dim = False
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False))
if only_check_last_dim:
self.assertEqual(output_shape[-1], last_dim)
else:
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
backend.clear_session()
@parameterized.parameters(MODEL_LIST)
def test_application_pooling(self, app, last_dim):
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, pooling='avg'))
self.assertShapeEqual(output_shape, (None, last_dim))
@parameterized.parameters(*MODEL_LIST_NO_NASNET)
def test_application_variable_input_channels(self, app, last_dim):
if backend.image_data_format() == 'channels_first':
input_shape = (1, None, None)
else:
input_shape = (None, None, 1)
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, input_shape=input_shape))
if 'MobileNetV3' in app.__name__:
self.assertShapeEqual(output_shape, (None, 1, 1, last_dim))
else:
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
backend.clear_session()
if backend.image_data_format() == 'channels_first':
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, input_shape=input_shape))
if 'MobileNetV3' in app.__name__:
self.assertShapeEqual(output_shape, (None, 1, 1, last_dim))
else:
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
backend.clear_session()
def _get_output_shape(model_fn):
model = model_fn()
return model.output_shape
if __name__ == '__main__':
tf.test.main()
| 5,219 | 34.27027 | 80 | py |
keras | keras-master/keras/applications/efficientnet_weight_update_util.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utils for EfficientNet models for Keras.
Write weights from ckpt file as in original repo
(https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
to h5 file for keras implementation of the models.
Usage:
# use checkpoint efficientnet-b0/model.ckpt (can be downloaded from
# https://storage.googleapis.com/cloud-tpu-checkpoints/
# efficientnet/ckptsaug/efficientnet-b0.tar.gz)
# to update weight without top layers, saving to efficientnetb0_notop.h5
python efficientnet_weight_update_util.py --model b0 --notop \
--ckpt efficientnet-b0/model.ckpt --o efficientnetb0_notop.h5
# use checkpoint noisy_student_efficientnet-b3/model.ckpt (providing
# improved result for b3, can be downloaded from
# https://storage.googleapis.com/cloud-tpu-checkpoints/
# efficientnet/noisystudent/noisy_student_efficientnet-b3.tar.gz)
# to update weight with top layers, saving to efficientnetb3_new.h5
python efficientnet_weight_update_util.py --model b3 --notop \
--ckpt noisy_student_efficientnet-b3/model.ckpt --o efficientnetb3_new.h5
"""
import tensorflow.compat.v2 as tf
import argparse
import warnings
from tensorflow.keras.applications import efficientnet
def write_ckpt_to_h5(path_h5, path_ckpt, keras_model, use_ema=True):
"""Map the weights in checkpoint file (tf) to h5 file (keras).
Args:
path_h5: str, path to output hdf5 file to write weights loaded from ckpt
files.
path_ckpt: str, path to the ckpt files (e.g. 'efficientnet-b0/model.ckpt')
that records efficientnet weights from original repo
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
keras_model: keras model, built from keras.applications efficientnet
functions (e.g. EfficientNetB0)
use_ema: Bool, whether to use ExponentialMovingAverage result or not
"""
model_name_keras = keras_model.name
model_name_tf = model_name_keras.replace('efficientnet', 'efficientnet-')
keras_weight_names = [w.name for w in keras_model.weights]
tf_weight_names = get_variable_names_from_ckpt(path_ckpt)
keras_blocks = get_keras_blocks(keras_weight_names)
tf_blocks = get_tf_blocks(tf_weight_names)
print('check variables match in each block')
for keras_block, tf_block in zip(keras_blocks, tf_blocks):
check_match(keras_block, tf_block, keras_weight_names, tf_weight_names,
model_name_tf)
print('{} and {} match.'.format(tf_block, keras_block))
block_mapping = {x[0]: x[1] for x in zip(keras_blocks, tf_blocks)}
changed_weights = 0
for w in keras_model.weights:
if 'block' in w.name:
# example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'
keras_block = w.name.split('/')[0].split('_')[0]
tf_block = block_mapping[keras_block]
tf_name = keras_name_to_tf_name_block(
w.name,
keras_block=keras_block,
tf_block=tf_block,
use_ema=use_ema,
model_name_tf=model_name_tf)
elif any([x in w.name for x in ['stem', 'top', 'predictions', 'probs']]):
tf_name = keras_name_to_tf_name_stem_top(
w.name, use_ema=use_ema, model_name_tf=model_name_tf)
elif 'normalization' in w.name:
print('skipping variable {}: normalization is a layer'
'in keras implementation, but preprocessing in '
'TF implementation.'.format(w.name))
continue
else:
raise ValueError('{} failed to parse.'.format(w.name))
try:
w_tf = tf.train.load_variable(path_ckpt, tf_name)
if (w.value().numpy() != w_tf).any():
w.assign(w_tf)
changed_weights += 1
except ValueError as e:
if any([x in w.name for x in ['top', 'predictions', 'probs']]):
warnings.warn('Fail to load top layer variable {}'
'from {} because of {}.'.format(w.name, tf_name, e))
else:
raise ValueError('Fail to load {} from {}'.format(w.name, tf_name))
total_weights = len(keras_model.weights)
print('{}/{} weights updated'.format(changed_weights, total_weights))
keras_model.save_weights(path_h5)
def get_variable_names_from_ckpt(path_ckpt, use_ema=True):
"""Get list of tensor names from checkpoint.
Args:
path_ckpt: str, path to the ckpt files
use_ema: Bool, whether to use ExponentialMovingAverage result or not.
Returns:
List of variable names from checkpoint.
"""
v_all = tf.train.list_variables(path_ckpt)
# keep name only
v_name_all = [x[0] for x in v_all]
if use_ema:
v_name_all = [x for x in v_name_all if 'ExponentialMovingAverage' in x]
else:
v_name_all = [x for x in v_name_all if 'ExponentialMovingAverage' not in x]
# remove util variables used for RMSprop
v_name_all = [x for x in v_name_all if 'RMS' not in x]
return v_name_all
def get_tf_blocks(tf_weight_names):
"""Extract the block names from list of full weight names."""
# Example: 'efficientnet-b0/blocks_0/conv2d/kernel' -> 'blocks_0'
tf_blocks = {x.split('/')[1] for x in tf_weight_names if 'block' in x}
# sort by number
tf_blocks = sorted(tf_blocks, key=lambda x: int(x.split('_')[1]))
return tf_blocks
def get_keras_blocks(keras_weight_names):
"""Extract the block names from list of full weight names."""
# example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'
keras_blocks = {x.split('_')[0] for x in keras_weight_names if 'block' in x}
return sorted(keras_blocks)
def keras_name_to_tf_name_stem_top(keras_name,
use_ema=True,
model_name_tf='efficientnet-b0'):
"""Mapping name in h5 to ckpt that is in stem or top (head).
we map name keras_name that points to a weight in h5 file
to a name of weight in ckpt file.
Args:
keras_name: str, the name of weight in the h5 file of keras implementation
use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not
model_name_tf: str, the name of model in ckpt.
Returns:
String for the name of weight as in ckpt file.
Raises:
KeyError: if we cannot parse the keras_name.
"""
if use_ema:
ema = '/ExponentialMovingAverage'
else:
ema = ''
stem_top_dict = {
'probs/bias:0': '{}/head/dense/bias{}',
'probs/kernel:0': '{}/head/dense/kernel{}',
'predictions/bias:0': '{}/head/dense/bias{}',
'predictions/kernel:0': '{}/head/dense/kernel{}',
'stem_conv/kernel:0': '{}/stem/conv2d/kernel{}',
'top_conv/kernel:0': '{}/head/conv2d/kernel{}',
}
for x in stem_top_dict:
stem_top_dict[x] = stem_top_dict[x].format(model_name_tf, ema)
# stem batch normalization
for bn_weights in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
tf_name = '{}/stem/tpu_batch_normalization/{}{}'.format(
model_name_tf, bn_weights, ema)
stem_top_dict['stem_bn/{}:0'.format(bn_weights)] = tf_name
# top / head batch normalization
for bn_weights in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
tf_name = '{}/head/tpu_batch_normalization/{}{}'.format(
model_name_tf, bn_weights, ema)
stem_top_dict['top_bn/{}:0'.format(bn_weights)] = tf_name
if keras_name in stem_top_dict:
return stem_top_dict[keras_name]
raise KeyError('{} from h5 file cannot be parsed'.format(keras_name))
def keras_name_to_tf_name_block(keras_name,
keras_block='block1a',
tf_block='blocks_0',
use_ema=True,
model_name_tf='efficientnet-b0'):
"""Mapping name in h5 to ckpt that belongs to a block.
we map name keras_name that points to a weight in h5 file
to a name of weight in ckpt file.
Args:
keras_name: str, the name of weight in the h5 file of keras implementation
keras_block: str, the block name for keras implementation (e.g. 'block1a')
tf_block: str, the block name for tf implementation (e.g. 'blocks_0')
use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not
model_name_tf: str, the name of model in ckpt.
Returns:
String for the name of weight as in ckpt file.
Raises:
ValueError if keras_block does not show up in keras_name
"""
if keras_block not in keras_name:
raise ValueError('block name {} not found in {}'.format(
keras_block, keras_name))
# all blocks in the first group will not have expand conv and bn
is_first_blocks = (keras_block[5] == '1')
tf_name = [model_name_tf, tf_block]
# depthwide conv
if 'dwconv' in keras_name:
tf_name.append('depthwise_conv2d')
tf_name.append('depthwise_kernel')
# conv layers
if is_first_blocks:
# first blocks only have one conv2d
if 'project_conv' in keras_name:
tf_name.append('conv2d')
tf_name.append('kernel')
else:
if 'project_conv' in keras_name:
tf_name.append('conv2d_1')
tf_name.append('kernel')
elif 'expand_conv' in keras_name:
tf_name.append('conv2d')
tf_name.append('kernel')
# squeeze expansion layers
if '_se_' in keras_name:
if 'reduce' in keras_name:
tf_name.append('se/conv2d')
elif 'expand' in keras_name:
tf_name.append('se/conv2d_1')
if 'kernel' in keras_name:
tf_name.append('kernel')
elif 'bias' in keras_name:
tf_name.append('bias')
# batch normalization layers
if 'bn' in keras_name:
if is_first_blocks:
if 'project' in keras_name:
tf_name.append('tpu_batch_normalization_1')
else:
tf_name.append('tpu_batch_normalization')
else:
if 'project' in keras_name:
tf_name.append('tpu_batch_normalization_2')
elif 'expand' in keras_name:
tf_name.append('tpu_batch_normalization')
else:
tf_name.append('tpu_batch_normalization_1')
for x in ['moving_mean', 'moving_variance', 'beta', 'gamma']:
if x in keras_name:
tf_name.append(x)
if use_ema:
tf_name.append('ExponentialMovingAverage')
return '/'.join(tf_name)
def check_match(keras_block, tf_block, keras_weight_names, tf_weight_names,
model_name_tf):
"""Check if the weights in h5 and ckpt match.
we match each name from keras_weight_names that is in keras_block
and check if there is 1-1 correspondence to names from tf_weight_names
that is in tf_block
Args:
keras_block: str, the block name for keras implementation (e.g. 'block1a')
tf_block: str, the block name for tf implementation (e.g. 'blocks_0')
keras_weight_names: list of str, weight names in keras implementation
tf_weight_names: list of str, weight names in tf implementation
model_name_tf: str, the name of model in ckpt.
"""
names_from_keras = set()
for x in keras_weight_names:
if keras_block in x:
y = keras_name_to_tf_name_block(
x,
keras_block=keras_block,
tf_block=tf_block,
model_name_tf=model_name_tf)
names_from_keras.add(y)
names_from_tf = set()
for x in tf_weight_names:
if tf_block in x and x.split('/')[1].endswith(tf_block):
names_from_tf.add(x)
names_missing = names_from_keras - names_from_tf
if names_missing:
raise ValueError('{} variables not found in checkpoint file: {}'.format(
len(names_missing), names_missing))
names_unused = names_from_tf - names_from_keras
if names_unused:
warnings.warn('{} variables from checkpoint file are not used: {}'.format(
len(names_unused), names_unused))
if __name__ == '__main__':
arg_to_model = {
'b0': efficientnet.EfficientNetB0,
'b1': efficientnet.EfficientNetB1,
'b2': efficientnet.EfficientNetB2,
'b3': efficientnet.EfficientNetB3,
'b4': efficientnet.EfficientNetB4,
'b5': efficientnet.EfficientNetB5,
'b6': efficientnet.EfficientNetB6,
'b7': efficientnet.EfficientNetB7
}
p = argparse.ArgumentParser(description='write weights from checkpoint to h5')
p.add_argument(
'--model',
required=True,
type=str,
help='name of efficient model',
choices=arg_to_model.keys())
p.add_argument(
'--notop',
action='store_true',
help='do not include top layers',
default=False)
p.add_argument('--ckpt', required=True, type=str, help='checkpoint path')
p.add_argument(
'--output', '-o', required=True, type=str, help='output (h5) file path')
args = p.parse_args()
include_top = not args.notop
model = arg_to_model[args.model](include_top=include_top)
write_ckpt_to_h5(args.output, args.ckpt, keras_model=model)
| 13,222 | 34.834688 | 80 | py |
ReCO | ReCO-master/test.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/23 下午1:43
@FileName: test.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import argparse
import torch
from model import Bert4ReCO
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", type=str, default='bert-base-chinese')
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument(
"--fp16",
action="store_true",
default=True,
)
args = parser.parse_args()
model_type = args.model_type
batch_size = args.batch_size
test_data = load_file('data/test.{}.obj'.format(model_type.replace('/', '.')))
test_data = sorted(test_data, key=lambda x: len(x[0]))
model = Bert4ReCO(model_type)
model.load_state_dict(torch.load('checkpoint.{}.th'.format(model_type.replace('/', '.')), map_location='cpu'))
model.cuda()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
[model] = amp.initialize([model], opt_level='O1', verbosity=0)
model.eval()
total = len(test_data)
right = 0
with torch.no_grad():
for i in tqdm(range(0, total, batch_size)):
seq = [x[0] for x in test_data[i:i + batch_size]]
labels = [x[1] for x in test_data[i:i + batch_size]]
seq = padding(seq, pads=0, max_len=512)
seq = torch.LongTensor(seq).cuda()
predictions = model([seq, None])
predictions = predictions.cpu()
right += predictions.eq(torch.LongTensor(labels)).sum().item()
acc = 100 * right / total
print('test acc is {}'.format(acc))
| 1,629 | 30.346154 | 110 | py |
ReCO | ReCO-master/prepare_data.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/23 上午10:15
@FileName: prepare_data.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import json
import random
from transformers import BertTokenizer
from utils import *
tokenizer = None
def get_shuffled_answer(alternatives):
answers_index = [0, 1, 2]
random.shuffle(answers_index)
alternatives = [alternatives[x] for x in answers_index]
label = list(answers_index).index(0)
return alternatives, label
def get_one_sample_features(one):
alternatives, label = get_shuffled_answer(one['alternatives'].split('|'))
query = one['query']
paragraph = clean(one['passage'])
alt_ids = [y for x in alternatives for y in [1] + tokenizer.encode(x)]
seq_ids = alt_ids + [2] + tokenizer.encode(query) + [
tokenizer.sep_token_id]
seq_ids += tokenizer.encode(paragraph, max_length=tokenizer.max_len - len(seq_ids))
return [seq_ids, label]
def convert_to_features(filename):
with open(filename, encoding='utf-8') as f:
raw = json.load(f)
data = multi_process(get_one_sample_features, raw)
print('get {} with {} samples'.format(filename, len(data)))
return data
def prepare_bert_data(model_type='bert-base-chinese'):
global tokenizer
tokenizer = BertTokenizer.from_pretrained(model_type)
if not os.path.exists('data/test.{}.obj'.format(model_type.replace('/', '.'))):
test_data = convert_to_features('data/ReCO/ReCO.testa.json')
dump_file(test_data, 'data/test.{}.obj'.format(model_type.replace('/', '.')))
if not os.path.exists('data/valid.{}.obj'.format(model_type.replace('/', '.'))):
valid_data = convert_to_features('data/ReCO/ReCO.validationset.json')
dump_file(valid_data, 'data/valid.{}.obj'.format(model_type.replace('/', '.')))
if not os.path.exists('data/train.{}.obj'.format(model_type.replace('/', '.'))):
train_data = convert_to_features('data/ReCO/ReCO.trainingset.json')
dump_file(train_data, 'data/train.{}.obj'.format(model_type.replace('/', '.')))
| 2,068 | 35.298246 | 87 | py |
ReCO | ReCO-master/utils.py | # -*- coding: utf-8 -*-
"""
@Time : 2019/11/20 下午6:14
@FileName: utils.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import os
import pickle
import re
import numpy as np
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
def multi_process(func, lst, num_cores=multiprocessing.cpu_count(), backend='multiprocessing'):
workers = Parallel(n_jobs=num_cores, backend=backend)
output = workers(delayed(func)(one) for one in tqdm(lst))
return [x for x in output if x]
def DBC2SBC(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if not (0x0021 <= inside_code <= 0x7e):
rstring += uchar
continue
rstring += chr(inside_code)
return rstring
def clean(txt):
txt = DBC2SBC(txt)
txt = txt.lower()
return re.sub('\s*', '', txt)
def dump_file(obj, filename):
f = open(filename, 'wb')
pickle.dump(obj, f)
def load_file(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
def get_dir_files(dirname):
L = []
for root, dirs, files in os.walk(dirname):
for file in files:
L.append(os.path.join(root, file))
return L
def padding(sequence, pads=0, max_len=None, dtype='int32'):
v_length = [len(x) for x in sequence] # every sequence length
seq_max_len = max(v_length)
if (max_len is None) or (max_len > seq_max_len):
max_len = seq_max_len
x = (np.ones((len(sequence), max_len)) * pads).astype(dtype)
for idx, s in enumerate(sequence):
trunc = s[:max_len]
x[idx, :len(trunc)] = trunc
return x
| 1,783 | 23.438356 | 95 | py |
ReCO | ReCO-master/model.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/23 上午10:13
@FileName: model.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModel
class Bert4ReCO(nn.Module):
def __init__(self, model_type):
super().__init__()
self.encoder = AutoModel.from_pretrained(model_type)
self.n_hidden = self.encoder.config.hidden_size
self.prediction = nn.Linear(self.n_hidden, 1, bias=False)
def forward(self, inputs):
[seq, label] = inputs
hidden = self.encoder(seq)[0]
mask_idx = torch.eq(seq, 1) # 1 is the index in the seq we separate each candidates.
hidden = hidden.masked_select(mask_idx.unsqueeze(2).expand_as(hidden)).view(
-1, 3, self.n_hidden)
hidden = self.prediction(hidden).squeeze(-1)
if label is None:
return hidden.argmax(1)
return F.cross_entropy(hidden, label)
if __name__ == '__main__':
model = Bert4ReCO('voidful/albert_chinese_xxlarge')
| 1,073 | 28.833333 | 93 | py |
ReCO | ReCO-master/train.py | # -*- coding: utf-8 -*-
"""
@Time : 2019/11/21 下午7:14
@FileName: train.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import argparse
import torch
from model import Bert4ReCO
from prepare_data import prepare_bert_data
from utils import *
import torch.distributed as dist
torch.manual_seed(100)
np.random.seed(100)
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--lr", type=float, default=2.0e-5)
parser.add_argument("--max_grad_norm", type=float, default=0.2)
parser.add_argument("--model_type", type=str, default="voidful/albert_chinese_base")
parser.add_argument(
"--fp16",
action="store_true",
default=True,
)
parser.add_argument("--local_rank", type=int, default=-1)
args = parser.parse_args()
model_type = args.model_type
local_rank = args.local_rank
if local_rank >= 0:
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.cuda.set_device(args.local_rank)
if local_rank in [-1, 0]:
prepare_bert_data(model_type)
if local_rank >= 0:
dist.barrier() # wait for the first gpu to load data
data = load_file('data/train.{}.obj'.format(model_type.replace('/', '.')))
valid_data = load_file('data/valid.{}.obj'.format(model_type.replace('/', '.')))
valid_data = sorted(valid_data, key=lambda x: len(x[0]))
batch_size = args.batch_size
model = Bert4ReCO(model_type).cuda()
optimizer = torch.optim.AdamW(model.parameters(),
weight_decay=0.01,
lr=args.lr)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
if local_rank >= 0:
try:
import apex
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use parallel training.")
model = apex.parallel.DistributedDataParallel(model)
def get_shuffle_data():
pool = {}
for one in data:
length = len(one[0]) // 5
if length not in pool:
pool[length] = []
pool[length].append(one)
for one in pool:
np.random.shuffle(pool[one])
length_lst = list(pool.keys())
np.random.shuffle(length_lst)
whole_data = [x for y in length_lst for x in pool[y]]
if local_rank >= 0:
remove_data_size = len(whole_data) % dist.get_world_size()
thread_data = [whole_data[x + args.local_rank] for x in
range(0, len(whole_data) - remove_data_size, dist.get_world_size())]
return thread_data
return whole_data
def iter_printer(total, epoch):
if local_rank >= 0:
if local_rank == 0:
return tqdm(range(0, total, batch_size), desc='epoch {}'.format(epoch))
else:
return range(0, total, batch_size)
else:
return tqdm(range(0, total, batch_size), desc='epoch {}'.format(epoch))
def train(epoch):
model.train()
train_data = get_shuffle_data()
total = len(train_data)
for i in iter_printer(total, epoch):
seq = [x[0] for x in train_data[i:i + batch_size]]
label = [x[1] for x in train_data[i:i + batch_size]]
seq = padding(seq, pads=0, max_len=512)
seq = torch.LongTensor(seq).cuda()
label = torch.LongTensor(label).cuda()
loss = model([seq, label])
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
def evaluation(epoch):
model.eval()
total = len(valid_data)
right = 0
with torch.no_grad():
for i in iter_printer(total, epoch):
seq = [x[0] for x in valid_data[i:i + batch_size]]
labels = [x[1] for x in valid_data[i:i + batch_size]]
seq = padding(seq, pads=0, max_len=512)
seq = torch.LongTensor(seq).cuda()
predictions = model([seq, None])
predictions = predictions.cpu()
right += predictions.eq(torch.LongTensor(labels)).sum().item()
acc = 100 * right / total
print('epoch {} eval acc is {}'.format(epoch, acc))
return acc
best_acc = 0.0
for epo in range(args.epoch):
train(epo)
if local_rank == -1 or local_rank == 0:
accuracy = evaluation(epo)
if accuracy > best_acc:
best_acc = accuracy
with open('checkpoint.{}.th'.format(model_type.replace('/', '.')), 'wb') as f:
state_dict = model.module.state_dict() if args.fp16 else model.state_dict()
torch.save(state_dict, f)
| 5,074 | 33.290541 | 114 | py |
ReCO | ReCO-master/BiDAF/inference.py | # -*- coding: utf-8 -*-
import argparse
import cPickle
import codecs
import torch
from utils import *
from preprocess import seg_data, transform_data_to_id
parser = argparse.ArgumentParser(description='inference procedure, note you should train the data at first')
parser.add_argument('--data', type=str,
default='data/ai_challenger_oqmrc_testa_20180816/ai_challenger_oqmrc_testa.json',
help='location of the test data')
parser.add_argument('--word_path', type=str, default='data/word2id.obj',
help='location of the word2id.obj')
parser.add_argument('--output', type=str, default='data/prediction.a.txt',
help='prediction path')
parser.add_argument('--model', type=str, default='model.pt',
help='model path')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size')
parser.add_argument('--cuda', action='store_true',default=True,
help='use CUDA')
args = parser.parse_args()
with open(args.model, 'rb') as f:
model = torch.load(f)
if args.cuda:
model.cuda()
with open(args.word_path, 'rb') as f:
word2id = cPickle.load(f)
raw_data = seg_data(args.data)
transformed_data = transform_data_to_id(raw_data, word2id)
data = [x + [y[2]] for x, y in zip(transformed_data, raw_data)]
data = sorted(data, key=lambda x: len(x[1]))
print 'test data size {:d}'.format(len(data))
def inference():
model.eval()
predictions = []
with torch.no_grad():
for i in range(0, len(data), args.batch_size):
one = data[i:i + args.batch_size]
query, _ = padding([x[0] for x in one], max_len=50)
passage, _ = padding([x[1] for x in one], max_len=300)
answer = pad_answer([x[2] for x in one])
str_words = [x[-1] for x in one]
ids = [x[3] for x in one]
query, passage, answer = torch.LongTensor(query), torch.LongTensor(passage), torch.LongTensor(answer)
if args.cuda:
query = query.cuda()
passage = passage.cuda()
answer = answer.cuda()
output = model([query, passage, answer, False])
for q_id, prediction, candidates in zip(ids, output, str_words):
prediction_answer = u''.join(candidates[prediction])
predictions.append(str(q_id) + '\t' + prediction_answer)
outputs = u'\n'.join(predictions)
with codecs.open(args.output, 'w',encoding='utf-8') as f:
f.write(outputs)
print 'done!'
if __name__ == '__main__':
inference()
| 2,636 | 34.635135 | 113 | py |
ReCO | ReCO-master/BiDAF/utils.py | # -*- coding: utf-8 -*-
import numpy as np
def pad_answer(batch):
output = []
length_info = [len(x[0]) for x in batch]
max_length = max(length_info)
for one in batch:
output.append([x + [0] * (max_length - len(x)) for x in one])
return output
def get_model_parameters(model):
total = 0
for parameter in model.parameters():
if parameter.requires_grad:
tmp = 1
for a in parameter.size():
tmp *= a
total += tmp
return total
def padding(sequence, pads=0, max_len=None, dtype='int32', return_matrix_for_size=False):
# we should judge the rank
if True or isinstance(sequence[0], list):
v_length = [len(x) for x in sequence] # every sequence length
seq_max_len = max(v_length)
if (max_len is None) or (max_len > seq_max_len):
max_len = seq_max_len
v_length = list(map(lambda z: z if z <= max_len else max_len, v_length))
x = (np.ones((len(sequence), max_len)) * pads).astype(dtype)
for idx, s in enumerate(sequence):
trunc = s[:max_len]
x[idx, :len(trunc)] = trunc
if return_matrix_for_size:
v_matrix = np.asanyarray([map(lambda item: 1 if item < line else 0, range(max_len)) for line in v_length],
dtype=dtype)
return x, v_matrix
return x, np.asarray(v_length, dtype='int32')
else:
seq_len = len(sequence)
if max_len is None:
max_len = seq_len
v_vector = sequence + [0] * (max_len - seq_len)
padded_vector = np.asarray(v_vector, dtype=dtype)
v_index = [1] * seq_len + [0] * (max_len - seq_len)
padded_index = np.asanyarray(v_index, dtype=dtype)
return padded_vector, padded_index
def shuffle_data(data, axis=1):
pool = {}
for one in data:
length = len(one[axis])
if length not in pool:
pool[length] = []
pool[length].append(one)
for one in pool:
np.random.shuffle(pool[one])
length_lst = list(pool.keys())
np.random.shuffle(length_lst)
return [x for y in length_lst for x in pool[y]]
| 2,196 | 32.8 | 118 | py |
ReCO | ReCO-master/BiDAF/MwAN.py | # -*- coding: utf-8 -*-
import torch
from torch import nn
from torch.nn import functional as F
class MwAN(nn.Module):
def __init__(self, vocab_size, embedding_size, encoder_size, drop_out=0.2):
super(MwAN, self).__init__()
self.drop_out=drop_out
self.embedding = nn.Embedding(vocab_size + 1, embedding_dim=embedding_size)
self.q_encoder = nn.GRU(input_size=embedding_size, hidden_size=encoder_size, batch_first=True,
bidirectional=True)
self.p_encoder = nn.GRU(input_size=embedding_size, hidden_size=encoder_size, batch_first=True,
bidirectional=True)
self.a_encoder = nn.GRU(input_size=embedding_size, hidden_size=embedding_size / 2, batch_first=True,
bidirectional=True)
self.a_attention = nn.Linear(embedding_size, 1, bias=False)
# Concat Attention
self.Wc1 = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.Wc2 = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vc = nn.Linear(encoder_size, 1, bias=False)
# Bilinear Attention
self.Wb = nn.Linear(2 * encoder_size, 2 * encoder_size, bias=False)
# Dot Attention :
self.Wd = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vd = nn.Linear(encoder_size, 1, bias=False)
# Minus Attention :
self.Wm = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vm = nn.Linear(encoder_size, 1, bias=False)
self.Ws = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vs = nn.Linear(encoder_size, 1, bias=False)
self.gru_agg = nn.GRU(12 * encoder_size, encoder_size, batch_first=True, bidirectional=True)
"""
prediction layer
"""
self.Wq = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vq = nn.Linear(encoder_size, 1, bias=False)
self.Wp1 = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.Wp2 = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vp = nn.Linear(encoder_size, 1, bias=False)
self.prediction = nn.Linear(2 * encoder_size, embedding_size, bias=False)
self.initiation()
def initiation(self):
initrange = 0.1
nn.init.uniform_(self.embedding.weight, -initrange, initrange)
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight, 0.1)
def forward(self, inputs):
[query, passage, answer, is_train] = inputs
q_embedding = self.embedding(query)
p_embedding = self.embedding(passage)
a_embeddings = self.embedding(answer)
a_embedding, _ = self.a_encoder(a_embeddings.view(-1, a_embeddings.size(2), a_embeddings.size(3)))
a_score = F.softmax(self.a_attention(a_embedding), 1)
a_output = a_score.transpose(2, 1).bmm(a_embedding).squeeze()
a_embedding = a_output.view(a_embeddings.size(0), 3, -1)
hq, _ = self.q_encoder(p_embedding)
hq=F.dropout(hq,self.drop_out)
hp, _ = self.p_encoder(q_embedding)
hp=F.dropout(hp,self.drop_out)
_s1 = self.Wc1(hq).unsqueeze(1)
_s2 = self.Wc2(hp).unsqueeze(2)
sjt = self.vc(torch.tanh(_s1 + _s2)).squeeze()
ait = F.softmax(sjt, 2)
qtc = ait.bmm(hq)
_s1 = self.Wb(hq).transpose(2, 1)
sjt = hp.bmm(_s1)
ait = F.softmax(sjt, 2)
qtb = ait.bmm(hq)
_s1 = hq.unsqueeze(1)
_s2 = hp.unsqueeze(2)
sjt = self.vd(torch.tanh(self.Wd(_s1 * _s2))).squeeze()
ait = F.softmax(sjt, 2)
qtd = ait.bmm(hq)
sjt = self.vm(torch.tanh(self.Wm(_s1 - _s2))).squeeze()
ait = F.softmax(sjt, 2)
qtm = ait.bmm(hq)
_s1 = hp.unsqueeze(1)
_s2 = hp.unsqueeze(2)
sjt = self.vs(torch.tanh(self.Ws(_s1 * _s2))).squeeze()
ait = F.softmax(sjt, 2)
qts = ait.bmm(hp)
aggregation = torch.cat([hp, qts, qtc, qtd, qtb, qtm], 2)
aggregation_representation, _ = self.gru_agg(aggregation)
sj = self.vq(torch.tanh(self.Wq(hq))).transpose(2, 1)
rq = F.softmax(sj, 2).bmm(hq)
sj = F.softmax(self.vp(self.Wp1(aggregation_representation) + self.Wp2(rq)).transpose(2, 1), 2)
rp = sj.bmm(aggregation_representation)
encoder_output = F.dropout(F.leaky_relu(self.prediction(rp)),self.drop_out)
score = F.softmax(a_embedding.bmm(encoder_output.transpose(2, 1)).squeeze(), 1)
if not is_train:
return score.argmax(1)
loss = -torch.log(score[:, 0]).mean()
return loss
| 4,704 | 44.679612 | 108 | py |
ReCO | ReCO-master/BiDAF/__init__.py | # -*- coding: utf-8 -*-
| 24 | 11.5 | 23 | py |
ReCO | ReCO-master/BiDAF/train.py | # -*- coding: utf-8 -*-
import argparse
import cPickle
import torch
from MwAN import MwAN
from preprocess import process_data
from utils import *
parser = argparse.ArgumentParser(description='PyTorch implementation for Multiway Attention Networks for Modeling '
'Sentence Pairs of the AI-Challenges')
parser.add_argument('--data', type=str, default='data/',
help='location directory of the data corpus')
parser.add_argument('--threshold', type=int, default=5,
help='threshold count of the word')
parser.add_argument('--epoch', type=int, default=50,
help='training epochs')
parser.add_argument('--emsize', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=128,
help='hidden size of the model')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size')
parser.add_argument('--log_interval', type=int, default=300,
help='# of batches to see the training error')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
args = parser.parse_args()
# vocab_size = process_data(args.data, args.threshold)
vocab_size = 98745
model = MwAN(vocab_size=vocab_size, embedding_size=args.emsize, encoder_size=args.nhid, drop_out=args.dropout)
print('Model total parameters:', get_model_parameters(model))
if args.cuda:
model.cuda()
optimizer = torch.optim.Adamax(model.parameters())
with open(args.data + 'train.pickle', 'rb') as f:
train_data = cPickle.load(f)
with open(args.data + 'dev.pickle', 'rb') as f:
dev_data = cPickle.load(f)
dev_data = sorted(dev_data, key=lambda x: len(x[1]))
print('train data size {:d}, dev data size {:d}'.format(len(train_data), len(dev_data)))
def train(epoch):
model.train()
data = shuffle_data(train_data, 1)
total_loss = 0.0
for num, i in enumerate(range(0, len(data), args.batch_size)):
one = data[i:i + args.batch_size]
query, _ = padding([x[0] for x in one], max_len=50)
passage, _ = padding([x[1] for x in one], max_len=350)
answer = pad_answer([x[2] for x in one])
query, passage, answer = torch.LongTensor(query), torch.LongTensor(passage), torch.LongTensor(answer)
if args.cuda:
query = query.cuda()
passage = passage.cuda()
answer = answer.cuda()
optimizer.zero_grad()
loss = model([query, passage, answer, True])
loss.backward()
total_loss += loss.item()
optimizer.step()
if (num + 1) % args.log_interval == 0:
print '|------epoch {:d} train error is {:f} eclipse {:.2f}%------|'.format(epoch,
total_loss / args.log_interval,
i * 100.0 / len(data))
total_loss = 0
def test():
model.eval()
r, a = 0.0, 0.0
with torch.no_grad():
for i in range(0, len(dev_data), args.batch_size):
one = dev_data[i:i + args.batch_size]
query, _ = padding([x[0] for x in one], max_len=50)
passage, _ = padding([x[1] for x in one], max_len=500)
answer = pad_answer([x[2] for x in one])
query, passage, answer = torch.LongTensor(query), torch.LongTensor(passage), torch.LongTensor(answer)
if args.cuda:
query = query.cuda()
passage = passage.cuda()
answer = answer.cuda()
output = model([query, passage, answer, False])
r += torch.eq(output, 0).sum().item()
a += len(one)
return r * 100.0 / a
def main():
best = 0.0
for epoch in range(args.epoch):
train(epoch)
acc = test()
if acc > best:
best = acc
with open(args.save, 'wb') as f:
torch.save(model, f)
print 'epcoh {:d} dev acc is {:f}, best dev acc {:f}'.format(epoch, acc, best)
if __name__ == '__main__':
main()
| 4,466 | 38.184211 | 120 | py |
ReCO | ReCO-master/BiDAF/preprocess.py | # -*- coding: utf-8 -*-
import cPickle
import json
import jieba
def seg_line(line):
return list(jieba.cut(line))
def seg_data(path):
print 'start process ', path
data = []
with open(path, 'r') as f:
for line in f:
dic = json.loads(line, encoding='utf-8')
question = dic['query']
doc = dic['passage']
alternatives = dic['alternatives']
data.append([seg_line(question), seg_line(doc), alternatives.split('|'), dic['query_id']])
return data
def build_word_count(data):
wordCount = {}
def add_count(lst):
for word in lst:
if word not in wordCount:
wordCount[word] = 0
wordCount[word] += 1
for one in data:
[add_count(x) for x in one[0:3]]
print 'word type size ', len(wordCount)
return wordCount
def build_word2id(wordCount, threshold=10):
word2id = {'<PAD>': 0, '<UNK>': 1}
for word in wordCount:
if wordCount[word] >= threshold:
if word not in word2id:
word2id[word] = len(word2id)
else:
chars = list(word)
for char in chars:
if char not in word2id:
word2id[char] = len(word2id)
print 'processed word size ', len(word2id)
return word2id
def transform_data_to_id(raw_data, word2id):
data = []
def map_word_to_id(word):
output = []
if word in word2id:
output.append(word2id[word])
else:
chars = list(word)
for char in chars:
if char in word2id:
output.append(word2id[char])
else:
output.append(1)
return output
def map_sent_to_id(sent):
output = []
for word in sent:
output.extend(map_word_to_id(word))
return output
for one in raw_data:
question = map_sent_to_id(one[0])
doc = map_sent_to_id(one[1])
candidates = [map_word_to_id(x) for x in one[2]]
length = [len(x) for x in candidates]
max_length = max(length)
if max_length > 1:
pad_len = [max_length - x for x in length]
candidates = [x[0] + [0] * x[1] for x in zip(candidates, pad_len)]
data.append([question, doc, candidates, one[-1]])
return data
def process_data(data_path, threshold):
train_file_path = data_path + 'ai_challenger_oqmrc_validationset_20180816/ai_challenger_oqmrc_validationset.json'
dev_file_path = data_path + 'ai_challenger_oqmrc_trainingset_20180816/ai_challenger_oqmrc_trainingset.json'
test_a_file_path = data_path + 'ai_challenger_oqmrc_testa_20180816/ai_challenger_oqmrc_testa.json'
test_b_file_path = data_path + 'ai_challenger_oqmrc_testb_20180816/ai_challenger_oqmrc_testb.json'
path_lst = [train_file_path, dev_file_path, test_a_file_path, test_b_file_path]
output_path = [data_path + x for x in ['dev.pickle', 'train.pickle', 'testa.pickle', 'testb.pickle']]
return _process_data(path_lst, threshold, output_path)
def _process_data(path_lst, word_min_count=5, output_file_path=[]):
raw_data = []
for path in path_lst:
raw_data.append(seg_data(path))
word_count = build_word_count([y for x in raw_data for y in x])
with open('data/word-count.obj', 'wb') as f:
cPickle.dump(word_count, f)
word2id = build_word2id(word_count, word_min_count)
with open('data/word2id.obj', 'wb') as f:
cPickle.dump(word2id, f)
for one_raw_data, one_output_file_path in zip(raw_data, output_file_path):
with open(one_output_file_path, 'wb') as f:
one_data = transform_data_to_id(one_raw_data, word2id)
cPickle.dump(one_data, f)
return len(word2id)
| 3,805 | 32.095652 | 117 | py |
ReCO | ReCO-master/BiDAF/BiDAF.py | # -*- coding: utf-8 -*-
"""
@Time : 2019/11/21 下午4:42
@FileName: BiDAF.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiDAF(nn.Module):
def __init__(self, vocab_size, embedding_size, encoder_size, drop_out=0.2):
super(BiDAF, self).__init__()
# 2. Word Embedding Layer
# initialize word embedding with GloVe
self.word_emb = nn.Embedding(vocab_size, embedding_size)
self.a_encoder = nn.GRU(input_size=embedding_size, hidden_size=embedding_size // 2, batch_first=True,
bidirectional=True)
self.a_attention = nn.Linear(embedding_size, 1, bias=False)
# 3. Contextual Embedding Layer
self.context_LSTM = nn.LSTM(input_size=embedding_size,
hidden_size=encoder_size,
bidirectional=True,
batch_first=True,
)
# 4. Attention Flow Layer
self.att_weight_c = nn.Linear(encoder_size * 2, 1)
self.att_weight_q = nn.Linear(encoder_size * 2, 1)
self.att_weight_cq = nn.Linear(encoder_size * 2, 1)
# 5. Modeling Layer
self.modeling_LSTM = nn.LSTM(input_size=encoder_size * 8,
hidden_size=encoder_size,
bidirectional=True,
batch_first=True,
num_layers=2
)
self.Wq = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vq = nn.Linear(encoder_size, 1, bias=False)
self.Wp1 = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.Wp2 = nn.Linear(2 * encoder_size, encoder_size, bias=False)
self.vp = nn.Linear(encoder_size, 1, bias=False)
self.prediction = nn.Linear(2 * encoder_size, embedding_size, bias=False)
self.drop_out = drop_out
def forward(self, inputs):
def att_flow_layer(c, q):
"""
:param c: (batch, c_len, hidden_size * 2)
:param q: (batch, q_len, hidden_size * 2)
:return: (batch, c_len, q_len)
"""
c_len = c.size(1)
q_len = q.size(1)
cq = []
for i in range(q_len):
# (batch, 1, hidden_size * 2)
qi = q.select(1, i).unsqueeze(1)
# (batch, c_len, 1)
ci = self.att_weight_cq(c * qi).squeeze()
cq.append(ci)
# (batch, c_len, q_len)
cq = torch.stack(cq, dim=-1)
# (batch, c_len, q_len)
s = self.att_weight_c(c).expand(-1, -1, q_len) + \
self.att_weight_q(q).permute(0, 2, 1).expand(-1, c_len, -1) + \
cq
# (batch, c_len, q_len)
a = F.softmax(s, dim=2)
# (batch, c_len, q_len) * (batch, q_len, hidden_size * 2) -> (batch, c_len, hidden_size * 2)
c2q_att = torch.bmm(a, q)
# (batch, 1, c_len)
b = F.softmax(torch.max(s, dim=2)[0], dim=1).unsqueeze(1)
# (batch, 1, c_len) * (batch, c_len, hidden_size * 2) -> (batch, hidden_size * 2)
q2c_att = torch.bmm(b, c).squeeze()
# (batch, c_len, hidden_size * 2) (tiled)
q2c_att = q2c_att.unsqueeze(1).expand(-1, c_len, -1)
# q2c_att = torch.stack([q2c_att] * c_len, dim=1)
# (batch, c_len, hidden_size * 8)
x = torch.cat([c, c2q_att, c * c2q_att, c * q2c_att], dim=-1)
return x
# 2. Word Embedding Layer
[query, passage, answer, is_train] = inputs
c_word = self.word_emb(passage)
q_word = self.word_emb(query)
a_embeddings = self.word_emb(answer)
a_embedding, _ = self.a_encoder(a_embeddings.view(-1, a_embeddings.size(2), a_embeddings.size(3)))
a_score = F.softmax(self.a_attention(a_embedding), 1)
a_output = a_score.transpose(2, 1).bmm(a_embedding).squeeze()
a_embedding = a_output.view(a_embeddings.size(0), 3, -1)
# Highway network
# 3. Contextual Embedding Layer
c, _ = self.context_LSTM(c_word)
q, _ = self.context_LSTM(q_word)
# 4. Attention Flow Layer
g = att_flow_layer(c, q)
# 5. Modeling Layer
m, _ = self.modeling_LSTM(g)
# 6. Output Layer
sj = F.softmax(self.vp(self.Wp1(m)).transpose(2, 1), 2)
rp = sj.bmm(m)
encoder_output = F.dropout(F.leaky_relu(self.prediction(rp)), self.drop_out)
score = F.softmax(a_embedding.bmm(encoder_output.transpose(2, 1)).squeeze(), 1)
if not is_train:
return score.argmax(1)
loss = -torch.log(score[:, 0]).mean()
return loss
| 4,904 | 40.923077 | 109 | py |
ReCO | ReCO-master/InHouseBert/prepare_data.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/23 上午10:15
@FileName: prepare_data.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import json
import random
from transformers import BertTokenizer, AutoTokenizer, XLNetTokenizer
from utils import *
import sentencepiece as spm
tokenizer = spm.SentencePieceProcessor()
tokenizer.load('bpe.50000.model')
def get_shuffled_answer(alternatives):
answers_index = [0, 1, 2]
random.shuffle(answers_index)
alternatives = [alternatives[x] for x in answers_index]
label = list(answers_index).index(0)
return alternatives, label
def get_one_sample_features(one):
alternatives, label = get_shuffled_answer(one['alternatives'].split('|'))
query = one['query']
paragraph = clean(one['passage'])
alt_ids = [y for x in alternatives for y in [1] + tokenizer.encode_as_ids(x)]
seq_ids = alt_ids + [2] + tokenizer.encode_as_ids(query) + [
2]
seq_ids += tokenizer.encode_as_ids(paragraph)[0:512-len(seq_ids)]
return [seq_ids, label]
def convert_to_features(filename):
with open(filename, encoding='utf-8') as f:
raw = json.load(f)
data = multi_process(get_one_sample_features, raw)
print('get {} with {} samples'.format(filename, len(data)))
return data
def prepare_bert_data(model_type='in-house-bert'):
if not os.path.exists('data/test.{}.obj'.format(model_type.replace('/', '.'))):
test_data = convert_to_features('../data/ReCO/ReCO.testa.json')
dump_file(test_data, 'data/test.{}.obj'.format(model_type.replace('/', '.')))
if not os.path.exists('data/valid.{}.obj'.format(model_type.replace('/', '.'))):
valid_data = convert_to_features('../data/ReCO/ReCO.validationset.json')
dump_file(valid_data, 'data/valid.{}.obj'.format(model_type.replace('/', '.')))
if not os.path.exists('data/train.{}.obj'.format(model_type.replace('/', '.'))):
train_data = convert_to_features('../data/ReCO/ReCO.trainingset.json')
dump_file(train_data, 'data/train.{}.obj'.format(model_type.replace('/', '.')))
prepare_bert_data()
| 2,106 | 35.327586 | 87 | py |
ReCO | ReCO-master/InHouseBert/model.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/24 下午6:18
@FileName: model.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import warnings
import apex
import torch
import torch.nn as nn
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.mlp import MLP
from torch.nn import functional as F
warnings.filterwarnings("ignore")
layer_norm = apex.normalization.FusedLayerNorm
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = SelfMultiheadAttn(d_model, nhead, dropout=dropout, impl='fast')
self.feed_forward = MLP([d_model, dim_feedforward, d_model])
self.d_model = d_model
self.norm1 = layer_norm(d_model)
self.norm2 = layer_norm(d_model)
self.activation = F.gelu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
# type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor
src = self.norm2(src)
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask, is_training=self.training)[0]
src = src + src2
src = self.norm1(src)
src2 = self.feed_forward(src.view(-1, self.d_model)).view(src.size())
src = src + src2
return src
class SelfAttention(nn.Module):
def __init__(self, n_hidden, n_layer, n_head=6):
super().__init__()
self.att = nn.ModuleList()
for l in range(n_layer):
en = TransformerEncoderLayer(n_hidden, n_head, n_hidden * 4)
self.att.append(en)
self.output_ln = layer_norm(n_hidden)
def forward(self, representations):
representations = representations.transpose(0, 1).contiguous()
for one in self.att:
representations = one(representations)
return self.output_ln(representations.transpose(0, 1))
class BERTLSTM(nn.Module):
def __init__(self, vocab_size, n_embedding, n_hidden, n_layer, n_head):
super().__init__()
vocabulary_size = (2 + vocab_size // 8) * 8
self.word_embedding = nn.Embedding(vocabulary_size, embedding_dim=n_embedding)
self.encoder = nn.LSTM(input_size=n_embedding, hidden_size=n_hidden // 2, bidirectional=True, batch_first=True)
self.n_embedding = n_embedding
self.n_hidden = n_hidden
self.attention = SelfAttention(n_hidden, n_layer, n_head=n_head)
self.output = nn.Sequential(nn.Linear(n_hidden, n_embedding),
nn.LeakyReLU(inplace=True),
apex.normalization.FusedLayerNorm(n_embedding))
self.trans = nn.Linear(n_embedding, vocabulary_size, bias=False)
self.word_embedding.weight = self.trans.weight
def inference(self, seq):
word_embedding = self.word_embedding(seq)
encoder_representations, _ = self.encoder(word_embedding)
encoder_representations = self.attention(encoder_representations)
return encoder_representations
class BERT(BERTLSTM):
def __init__(self, vocab_size, n_embedding, n_hidden, n_layer, n_head):
super().__init__(vocab_size, n_embedding, n_hidden, n_layer, n_head)
del self.trans
del self.output
self.prediction = nn.Sequential(
nn.Linear(self.n_hidden, self.n_hidden // 2),
nn.GELU(),
nn.Linear(self.n_hidden // 2, 1, bias=False),
)
def forward(self, inputs):
[seq, label] = inputs
hidden = self.inference(seq)
mask_idx = torch.eq(seq, 1) # 1 is the index in the seq we separate each candidates.
hidden = hidden.masked_select(mask_idx.unsqueeze(2).expand_as(hidden)).view(
-1, 3, self.n_hidden)
hidden = self.prediction(hidden).squeeze(-1)
if label is None:
return hidden.argmax(1)
return F.cross_entropy(hidden, label)
| 4,192 | 37.118182 | 119 | py |
ReCO | ReCO-master/InHouseBert/__init__.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/24 下午6:10
@FileName: __init__.py.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
""" | 139 | 19 | 37 | py |
ReCO | ReCO-master/InHouseBert/train.py | # -*- coding: utf-8 -*-
"""
@Time : 2020/6/24 下午6:16
@FileName: train.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import argparse
import sys
sys.path.append("../..")
sys.path.append("..")
from tasks.ReCO.model import BERT
from utils import *
import torch.distributed as dist
torch.manual_seed(100)
np.random.seed(200)
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--lr", type=float, default=4.0e-5)
parser.add_argument("--max_grad_norm", type=float, default=0.2)
parser.add_argument("--model_type", type=str, default="bert-base-chinese-new")
parser.add_argument(
"--fp16",
action="store_true",
default=True,
)
parser.add_argument("--local_rank", type=int, default=-1)
args = parser.parse_args()
model_type = args.model_type
local_rank = args.local_rank
if local_rank >= 0:
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.cuda.set_device(args.local_rank)
data = load_file('data/train.{}.obj'.format(model_type.replace('/', '.')))
valid_data = load_file('data/valid.{}.obj'.format(model_type.replace('/', '.')))
valid_data = sorted(valid_data, key=lambda x: len(x[0]))
batch_size = args.batch_size
n_embedding = 128
n_hidden = 768
n_layer = 12
n_head = 12
vocab_size = 50000
model = BERT(vocab_size, n_embedding, n_hidden, n_layer, n_head)
state_dict = load_file('model.bert.base.th')
for name, para in model.named_parameters():
if name not in state_dict:
print('{} not load'.format(name))
continue
para.data = torch.FloatTensor(state_dict[name])
model.cuda()
optimizer = torch.optim.AdamW(model.parameters(),
weight_decay=0.01,
lr=args.lr)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level='O2', verbosity=0)
if local_rank >= 0:
try:
import apex
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use parallel training.")
model = apex.parallel.DistributedDataParallel(model)
def get_shuffle_data():
pool = {}
for one in data:
length = len(one[0]) // 5
if length not in pool:
pool[length] = []
pool[length].append(one)
for one in pool:
np.random.shuffle(pool[one])
length_lst = list(pool.keys())
np.random.shuffle(length_lst)
whole_data = [x for y in length_lst for x in pool[y]]
if local_rank >= 0:
remove_data_size = len(whole_data) % dist.get_world_size()
thread_data = [whole_data[x + args.local_rank] for x in
range(0, len(whole_data) - remove_data_size, dist.get_world_size())]
return thread_data
return whole_data
def iter_printer(total, epoch):
if local_rank >= 0:
if local_rank == 0:
return tqdm(range(0, total, batch_size), desc='epoch {}'.format(epoch))
else:
return range(0, total, batch_size)
else:
return tqdm(range(0, total, batch_size), desc='epoch {}'.format(epoch))
def train(epoch):
model.train()
train_data = get_shuffle_data()
total = len(train_data)
for i in iter_printer(total, epoch):
seq = [x[0] for x in train_data[i:i + batch_size]]
label = [x[1] for x in train_data[i:i + batch_size]]
seq, _ = padding(seq, pads=0, max_len=512)
seq = torch.LongTensor(seq).cuda()
label = torch.LongTensor(label).cuda()
loss = model([seq, label])
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
def evaluation(epoch):
model.eval()
total = len(valid_data)
right = 0
with torch.no_grad():
for i in iter_printer(total, epoch):
seq = [x[0] for x in valid_data[i:i + batch_size]]
labels = [x[1] for x in valid_data[i:i + batch_size]]
seq, _ = padding(seq, pads=0, max_len=512)
seq = torch.LongTensor(seq).cuda()
predictions = model([seq, None])
predictions = predictions.cpu()
right += predictions.eq(torch.LongTensor(labels)).sum().item()
acc = 100 * right / total
print('epoch {} eval acc is {}'.format(epoch, acc))
return acc
best_acc = 0.0
for epo in range(args.epoch):
train(epo)
if local_rank == -1 or local_rank == 0:
accuracy = evaluation(epo)
if accuracy > best_acc:
best_acc = accuracy
with open('checkpoint.{}.th'.format(model_type.replace('/', '.')), 'wb') as f:
state_dict = model.module.state_dict() if args.fp16 else model.state_dict()
torch.save(state_dict, f)
| 5,292 | 32.713376 | 114 | py |
mining-legal-arguments | mining-legal-arguments-main/importance_model.py | from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import os
import json
from cassis import *
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
with open('gold_data/TypeSystem.xml', 'rb') as f:
typesystem = load_typesystem(f)
def read_xmi(file, typesystem=typesystem):
"""
Reads the data from an xmi file and returns the tokens, paragraphs and annotations.
:param file: path to xmi file
:param typesystem: typesystem of the xmi
:return: tokens, paragraphs and annotations"""
with open(file, 'rb') as f:
cas = load_cas_from_xmi(f, typesystem=typesystem)
tokens = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token')
#paragraphs = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Paragraph')
paragraphs = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence')
#sents = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence')
annotations = cas.select('webanno.custom.LegalArgumentation')
return tokens, paragraphs, annotations
def read_data(path, cases=None):
"""
Reads the data from all xmi files in the specified path and returns lists of the tokens, paragraphs and annotations.
:param path: path to directory with the xmi files
:param cases: dictionary of the cases
:return: dictionary of the cases with the xmi data added"""
docs_token = []
docs_paras = []
docs_anno = []
files = [f for f in os.listdir(path) if f.endswith('.xmi')]
del files[files.index('001-67472.xmi')]
del files[files.index('001-175007.xmi')]
if not cases:
cases = {}
for file in files:
cases[file[:-4]] = {}
for f in files:
data = read_xmi(os.path.join(path, f))
cases[f[:-4]]['token_xmi'] = data[0]
cases[f[:-4]]['para_xmi'] = data[1]
cases[f[:-4]]['anno_xmi'] = data[2]
return cases
def prepare_data(tokens, annotations):
"""
Converts the xmi data into the tokens and corresponding BIO tags for the argType and agent.
:param tokens: list of xmi tokens
:param annotations: list of xmi annotations
:return: list of tokens, list of argType BIO tags and list of agent BIO tags"""
tokens_raw = [x.get_covered_text() for x in tokens]
# id -> position in list of tokens so we can insert the bio tag at appropriate place
lookup = dict()
for i, token in enumerate(tokens):
lookup[token.xmiID] = i
bio_tags_args = len(tokens)*['O'] # O tag for non-arguments
for anno in annotations:
start = anno.begin
end = anno.end
for tok in tokens:
# B tag for begin
if tok.begin == start and anno.ArgType is not None:
bio_tags_args[lookup[tok.xmiID]] = 'B-' + anno.ArgType
# I tag for in between start and end(can't be == end because end is exclusive)
elif tok.begin > start and tok.begin < end and anno.ArgType is not None:
bio_tags_args[lookup[tok.xmiID]] = 'I-' + anno.ArgType
# same for agent tags
bio_tags_agent = len(tokens)*['O'] # O tag for non-arguments
for anno in annotations:
start = anno.begin
end = anno.end
for tok in tokens:
# B tag for begin
if tok.begin == start and anno.Akteur is not None:
bio_tags_agent[lookup[tok.xmiID]] = 'B-' + anno.Akteur
# I tag for in between start and end(can't be == end because end is exclusive)
elif tok.begin > start and tok.begin < end and anno.Akteur is not None:
bio_tags_agent[lookup[tok.xmiID]] = 'I-' + anno.Akteur
return tokens_raw, bio_tags_args, bio_tags_agent
def paragraphed_tokens(tokens, paragraphs, shorten=True):
"""
Divides the tokens into our input units (paragraphs).
:param tokens: list of xmi tokens
:param annotations: list of xmi paragraphs
:return: list of paragraphs"""
paragraphed_token = []
for para in paragraphs:
start = para.begin
end = para.end
para_toks = []
for tok in tokens:
if tok.begin >= start and tok.begin < end:
para_toks.append(tok.get_covered_text())
paragraphed_token.append(para_toks)
# shorten files because argumentation starts only after "THE LAW"
if shorten and para_toks == ['THE', 'LAW']:
paragraphed_token = []
if shorten and para_toks == ['AS', 'TO', 'THE', 'LAW']:
paragraphed_token = []
return paragraphed_token
def plot_coefficients(coef, feature_names, title='', top_features=5):
#coef = classifier.coef_.ravel()
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients = np.argsort(coef)[:top_features]
top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])
# create plot
plt.figure(figsize=(15,5))
colors = ['red' if c < 0 else 'blue' for c in coef[top_coefficients]]
plt.barh(np.arange(2 * top_features), coef[top_coefficients], color=colors)
feature_names = np.array(feature_names)
plt.title(title, fontdict = {'fontsize' : 16})
plt.xticks(fontsize=12)
plt.yticks(np.arange(0, 0 + 2 * top_features), feature_names[top_coefficients], rotation=0, ha='right', fontsize=14)
plt.tight_layout()
plt.show()
list_argType = ['Distinguishing',
'Einschätzungsspielraum',
'Entscheidung des EGMR',
'Konsens der prozessualen Parteien',
'Overruling',
'Rechtsvergleichung',
'Sinn & Zweck Auslegung',
'Subsumtion',
'Systematische Auslegung',
'Verhältnismäßigkeitsprüfung – Angemessenheit',
'Verhältnismäßigkeitsprüfung – Geeignetheit',
'Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'Vorherige Rechtsprechung des EGMR',
'Wortlaut Auslegung']
list_agent = ['Beschwerdeführer', 'EGMR', 'Staat', 'Kommission/Kammer', 'Dritte']
# whether to recompute the dict containing the mapping from cases name to importance level(necessary with new data)
recompute_importance_dict = False
# whether to use already saved cases dict with everything (tokens, bio-tags ...) but the xmi data for feature creation (avoid unnecessary computation if using the same data)
use_precomputed = False
# whether to use the final dict with the feature values and the associated importance level
recompute_feature_dict = False
if recompute_importance_dict:
goldfiles = [f.split('.')[0] for f in os.listdir('gold_data/') if f.endswith('.xmi')]
# path to scraped files with imoprtance information
directory = 'ECHR-Scraper-master-echrscraper-rss_approach-03_all_cases_html/echrscraper/rss_approach/03_all_cases_html/'
directories = [f for f in os.listdir(directory)]
importance_dict = {}
# match with gold data
for d in directories:
files = [f.split('.')[0] for f in os.listdir(os.path.join(directory, d)) if f.endswith('.json')]
for match in set(goldfiles).intersection(set(files)):
with open(os.path.join(directory, d, match + '.json'), 'r') as f:
case = json.load(f)
importance_dict[match] = case['results'][0]['columns']['importance']
# original data used excluded the following files
del importance_dict['001-67472']
del importance_dict['001-175007']
# save importance dict
with open('gold_data/importance_dict.json', 'w') as f:
json.dump(importance_dict, f)
if recompute_feature_dict:
# recompute from scratch
if not use_precomputed:
# read xmi data
cases = read_data('gold_data/')
# convert to tokens and tags for each file/case
for k in cases.keys():
cases[k]['token_raw'], cases[k]['bio_tags_args'], cases[k]['bio_tags_agent'] = prepare_data(cases[k]['token_xmi'], cases[k]['anno_xmi'])
# get paragraphed tokens for each case
for k in cases.keys():
cases[k]['para_token_shortened'] = paragraphed_tokens(cases[k]['token_xmi'], cases[k]['para_xmi'])
# also shorten ArgType and Agent tags
for k,v in cases.items():
skipped = len([item for sublist in v['para_token_shortened'] for item in sublist])
cases[k]['bio_tags_args_shortened'] = cases[k]['bio_tags_args'][-skipped:]
cases[k]['bio_tags_agent_shortened'] = cases[k]['bio_tags_agent'][-skipped:]
# get paragraph representation for both tagsets
for k,v in cases.items():
start = 0
paragraphed_labels_argType = []
paragraphed_labels_agent = []
for par in v['para_token_shortened']:
end = start + len(par)
paragraphed_labels_argType.append(v['bio_tags_args_shortened'][start:end])
paragraphed_labels_agent.append(v['bio_tags_agent_shortened'][start:end])
start = end
cases[k]['para_args_shortened'] = paragraphed_labels_argType
cases[k]['para_agent_shortened'] = paragraphed_labels_agent
# add shortened tokens
for k,v in cases.items():
cases[k]['token_raw_shortened'] = v['token_raw'][-len(v['bio_tags_agent_shortened']):]
# clean xmi data since it cannot be saved in json
for k in cases.keys():
cases[k]['token_xmi'] = ''
cases[k]['para_xmi'] = ''
cases[k]['anno_xmi'] = ''
# save computation
with open('gold_data/cases_features.json', 'w') as f:
json.dump(cases, f)
# use precomputed
with open('gold_data/cases_features.json', 'r') as f:
cases = json.load(f)
# add xmi data
cases = read_data('gold_data/', cases)
# compute features (add new features here)
for k,v in cases.items():
if not v['anno_xmi']:
print('No annotation in file ', k)
no_annos.append(k)
continue
features = {}
features['Doc Length'] = len(v['token_raw'])
features['Fraction Argumentive Part'] = 1 - Counter(v['bio_tags_args'])['O'] / len(v['token_raw'])
features['Shortened Doc Length'] = len(v['token_raw_shortened'])
features['Shortened Fraction Argumentive Part'] = 1 - Counter(v['bio_tags_args_shortened'])['O'] / len(v['token_raw_shortened'])
features['No. of Args'] = len(v['anno_xmi'])
argTypes = [anno.ArgType for anno in v['anno_xmi']]
agents = [anno.Akteur for anno in v['anno_xmi']]
c_argTypes = Counter(argTypes)
c_agents = Counter(agents)
for arg in list_argType:
#features[f'No. of {arg} Args'] = c_argTypes[arg]
features[f'Fraction of {arg} Arg'] = c_argTypes[arg] / features['No. of Args']
for agent in list_agent:
#features[f'No. of {agent} Agents'] = c_agents[agent]
features[f'Fraction of {agent} Agent'] = c_agents[agent] / features['No. of Args']
features['Avg. Arg Length (Chars)'] = sum([anno.end - anno.begin for anno in v['anno_xmi']]) / len(v['anno_xmi'])
cases[k]['features'] = features
# extract features with importance level and save them
df = pd.DataFrame(columns=list(cases['001-101152']['features'].keys()) + ['Importance'])
y = []
for k,v in cases.items():
df = df.append(v['features'], ignore_index=True)
y.append(int(importance_dict[k]))
df['Importance'] = y
df.to_csv('gold_data/importance_model_features.csv', encoding='utf-8', index=False, sep='\t')
visualize_feature_importance = True
# read features with classes
df = pd.read_csv('gold_data/importance_model_features.csv', encoding='utf-8', sep='\t')
print('Distribution', Counter(df['Importance']))
X = df.drop('Importance', axis=1)
y = df['Importance']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=18)
print('Train Distribution', Counter(y_train))
print('Test Distribution', Counter(y_test))
# standardize data
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
param_grid = [
{'C': [0.1, 1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [0.1, 1, 10, 100, 1000], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001, 'scale'], 'degree': [2, 3, 4, 5, 6], 'kernel': ['poly']}
]
grid = GridSearchCV(SVC(max_iter=1000000), param_grid, refit=True, verbose=3, scoring='f1_macro')
grid.fit(X_train_scaled,y_train)
print('Best Grid Params: ', grid.best_params_)
print('Cross validation score of these params ', grid.best_score_)
preds = grid.predict(X_test_scaled)
print('Test scores')
print(classification_report(y_true=y_test, y_pred=preds))
if visualize_feature_importance:
#for i, t in enumerate(['1 vs. 2', '1 vs. 3', '1 vs. 4', '2 vs. 3', '2 vs. 4', '3 vs. 4']):
#plot_coefficients(grid.best_estimator_.coef_[i], X_train.columns, title=t)
print('Average values for each improtance level:')
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('display.max_columns', 10)
print(df.groupby('Importance').mean().transpose())
| 13,432 | 39.098507 | 173 | py |
mining-legal-arguments | mining-legal-arguments-main/evaluate.py | #!/usr/bin/env python
# coding: utf-8
from collections import Counter
from prettytable import PrettyTable
import os
from transformers import AutoTokenizer
import torch
from torch.utils.data import Dataset
import pandas as pd
from datasets import load_dataset, load_metric
import csv
from ast import literal_eval
import numpy as np
import torch
import torch.nn as nn
import transformers
from datasets import load_dataset, load_metric
import logging
import dataclasses
from torch.utils.data.dataloader import DataLoader
from transformers.training_args import is_torch_tpu_available
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.data.data_collator import DataCollator, InputDataClass
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from typing import List, Union, Dict
from transformers import DataCollatorForTokenClassification
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.file_utils import PaddingStrategy
from typing import Optional, Any
from sklearn.metrics import confusion_matrix
from multiTaskModel import MultitaskModel, StrIgnoreDevice, DataLoaderWithTaskname, MultitaskDataloader, MultitaskTrainer, MyDataCollatorForTokenClassification, compute_f1, compute_macro_f1, eval_f1
import argparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
id2label_argType = ['B-Distinguishing',
'B-Einschätzungsspielraum',
'B-Entscheidung des EGMR',
'B-Konsens der prozessualen Parteien',
'B-Overruling',
'B-Rechtsvergleichung',
'B-Sinn & Zweck Auslegung',
'B-Subsumtion',
'B-Systematische Auslegung',
'B-Verhältnismäßigkeitsprüfung – Angemessenheit',
'B-Verhältnismäßigkeitsprüfung – Geeignetheit',
'B-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'B-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'B-Vorherige Rechtsprechung des EGMR',
'B-Wortlaut Auslegung',
'I-Distinguishing',
'I-Einschätzungsspielraum',
'I-Entscheidung des EGMR',
'I-Konsens der prozessualen Parteien',
'I-Overruling',
'I-Rechtsvergleichung',
'I-Sinn & Zweck Auslegung',
'I-Subsumtion',
'I-Systematische Auslegung',
'I-Verhältnismäßigkeitsprüfung – Angemessenheit',
'I-Verhältnismäßigkeitsprüfung – Geeignetheit',
'I-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'I-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'I-Vorherige Rechtsprechung des EGMR',
'I-Wortlaut Auslegung',
'O']
label2id_argType = {}
for i, label in enumerate(id2label_argType):
label2id_argType[label] = i
id2label_agent = ['B-Beschwerdeführer',
'B-Dritte',
'B-EGMR',
'B-Kommission/Kammer',
'B-Staat',
'I-Beschwerdeführer',
'I-Dritte',
'I-EGMR',
'I-Kommission/Kammer',
'I-Staat',
'O']
label2id_agent = {}
for i, label in enumerate(id2label_agent):
label2id_agent[label] = i
def tokenize_and_align_labels_argType(examples, label_all_tokens=False):
"""
Tokenizes the input using the tokenizer and aligns the argument type labels to the subwords.
:param examples: input dataset
:param label_all_tokens: Whether to label all subwords of a token or only the first subword
:return: Tokenized input"""
tokenized_inputs = tokenizer(examples['tokens'], truncation=True, is_split_into_words=True)
labels = []
for i, label in enumerate(examples['labels']):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label2id_argType[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label2id_argType[label[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
def tokenize_and_align_labels_agent(examples, label_all_tokens=False):
"""
Tokenizes the input using the tokenizer and aligns the agent labels to the subwords.
:param examples: input dataset
:param label_all_tokens: Whether to label all subwords of a token or only the first subword
:return: Tokenized input"""
tokenized_inputs = tokenizer(examples['tokens'], truncation=True, is_split_into_words=True)
labels = []
for i, label in enumerate(examples['labels']):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label2id_agent[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label2id_agent[label[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
def get_subset_df(tokens, predictions, labels, predlabel=None, truelabel=None):
"""
Can filter model predictions by predicted and true label. If none provided, just postprocesses and returns output.
:param tokens: text tokens
:param predictions: predictions of the model
:param labels: true annotator labels
:param predlabel: optional, filters the model predictions if a label is provided, e.g. label2id_agent['I-EGMR']
:param truelabels: optional, filters the true annotator labels if a label is provided, e.g. label2id_agent['I-EGMR']
:return: DataFrame with the (filtered) predictions, labels and tokens."""
pred = []
label = []
for p,l in zip(predictions, labels):
p = np.array(p)
l = np.array(l)
ind = np.logical_and(p > -1, l > -1)
pred.append(p[ind].tolist())
label.append(l[ind].tolist())
if predlabel is None and truelabel is None:
return pd.DataFrame({'Predictions': pred, 'Labels': label, 'Tokens': tokens})
elif predlabel is None:
preds = []
labels = []
toks = []
for i,l in enumerate(label):
if truelabel in l[2:] and not truelabel in pred[i][2:]:
preds.append(pred[i])
labels.append(l)
toks.append(tokens[i])
return pd.DataFrame({'Predictions': preds, 'Labels': labels, 'Tokens': toks})
elif truelabel is None:
preds = []
labels = []
toks = []
for i,p in enumerate(pred):
if predlabel in p[2:] and not predlabel in label[i][2:]:
preds.append(p)
labels.append(llabel[i])
toks.append(tokens[i])
return pd.DataFrame({'Predictions': preds, 'Labels': labels, 'Tokens': toks})
else:
preds = []
labels = []
toks = []
for i,p in enumerate(pred):
if predlabel in p[2:] and truelabel in label[i][2:]:
preds.append(p)
labels.append(label[i])
toks.append(tokens[i])
return pd.DataFrame({'Predictions': preds, 'Labels': labels, 'Tokens': toks})
def save_predictions(tokens, predictions, labels, file):
"""
Saves the model predictions as csv after postprocessing them.
:param tokens: text tokens
:param predictions: predictions of the model
:param labels: true annotator labels
:param file: path of the output file
"""
df = get_subset_df(tokens, predictions, labels)
df.to_csv(file , sep='\t', encoding='utf-8', index=False)
if __name__ == '__main__':
# parse optional args
parser = argparse.ArgumentParser(description='Evaluate a MultiTask model and save its predictions')
parser.add_argument('--pathprefix', help='path to the project directory')
parser.add_argument('--models', nargs='*' ,help='paths to the models to evaluate')
parser.add_argument('--test_dir', help='path to the directory with the test files')
parser.add_argument('--val_dir', help='path to the directory with the dev files')
parser.add_argument('--output_dir', help='path to the output directory for saving the predictions')
parser.add_argument('--do_val', default=False, type=lambda x: (str(x).lower() == 'true'), help='whether to evaluate the validation/dev dataset')
parser.add_argument('--do_test', default=True, type=lambda x: (str(x).lower() == 'true'), help='whether to evaluate the test dataset')
args = parser.parse_args()
# project directory
pathprefix = '/ukp-storage-1/dfaber/'
pathprefix = '../Uni/masterthesis/'
pathprefix = ''
if args.pathprefix:
pathprefix = args.pathprefix
#test_dir = 'data/article_3/'
test_dir = 'data/test/'
if args.test_dir:
test_dir = args.test_dir
val_dir = 'data/val/'
if args.val_dir:
val_dir = args.val_dir
output_dir = 'predictions/'
if args.output_dir:
output_dir = args.output_dir
# load datasets
testfiles = [f for f in os.listdir(os.path.join(pathprefix, test_dir, 'argType/')) if f.endswith('.csv')]
valfiles = [f for f in os.listdir(os.path.join(pathprefix, val_dir, 'argType/')) if f.endswith('.csv')]
testfiles = testfiles[:2]
valfiles = valfiles[:2]
dataset_argType = load_dataset('csv', data_files={'test': [os.path.join(pathprefix, test_dir, 'argType/', file) for file in testfiles],
'validation': [os.path.join(pathprefix, val_dir, 'argType/', file) for file in valfiles]}, delimiter='\t')
dataset_actor = load_dataset('csv', data_files={'test': [os.path.join(pathprefix, test_dir, 'agent/', file) for file in testfiles],
'validation': [os.path.join(pathprefix, val_dir, 'agent/', file) for file in valfiles]}, delimiter='\t')
dataset_argType = dataset_argType.map(lambda x: {'tokens': literal_eval(x['tokens']), 'labels': literal_eval(x['labels'])})
dataset_actor = dataset_actor.map(lambda x: {'tokens': literal_eval(x['tokens']), 'labels': literal_eval(x['labels'])})
# models to evaluate
'''
models = ['/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-39820/bert', '/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-47784/bert',
'/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-55748/bert', '/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-71676/bert',
'/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-79640/bert', '/ukp-storage-1/dfaber/models/multitask/roberta-large-final/checkpoint-111482/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-final/checkpoint-143334/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-final/checkpoint-159260/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-13000/checkpoint-95556/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-13000/checkpoint-127408/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-13000/checkpoint-143334/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-13000/checkpoint-159260/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-111482/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-127408/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-143334/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-159260/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-final/checkpoint-143334/roberta']
'''
models = ['/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-39820/bert', '/ukp-storage-1/dfaber/models/multitask/legal-bert-final/checkpoint-47784/bert',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-final/checkpoint-111482/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-final/checkpoint-143334/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-13000/checkpoint-95556/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-13000/checkpoint-143334/roberta',
'/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-143334/roberta', '/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-159260/roberta']
models = ['/ukp-storage-1/dfaber/models/multitask/roberta-large-fp-15000/checkpoint-143334/roberta']
if args.models:
models = args.models
# Evaluate each model
for model in models:
print('\n\n\n\n********************Evaluating ', model, '********************\n\n\n\n')
# load model and tokenizer
multitask_model = torch.load(model)
tokenizer = AutoTokenizer.from_pretrained(multitask_model.encoder.name_or_path)
if model.split('/')[-1] == 'roberta':
tokenizer.add_prefix_space = True
if tokenizer.model_max_length > 1024:
tokenizer.model_max_length = 512
# preprocess data and create datasets
tokenized_dataset_argType = dataset_argType.map(tokenize_and_align_labels_argType, batched=True)
tokenized_dataset_actor = dataset_actor.map(tokenize_and_align_labels_agent, batched=True)
dataset_dict = {
"ArgType": tokenized_dataset_argType,
"Actor": tokenized_dataset_actor,
}
data_collator= MyDataCollatorForTokenClassification(tokenizer)
test_dataset = {
task_name: dataset["test"]
for task_name, dataset in dataset_dict.items()
}
val_dataset = {
task_name: dataset["validation"]
for task_name, dataset in dataset_dict.items()
}
# initialize Trainer
batch_size = 8
train_args = transformers.TrainingArguments(
'test_bert/legal_bert/',
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
)
trainer = MultitaskTrainer(
model=multitask_model,
args=train_args,
data_collator=data_collator,
eval_dataset=val_dataset,
tokenizer=tokenizer,
compute_metrics=eval_f1,
)
# evaluate validation data if specified
if args.do_val:
print('\n\n*****VALIDATION DATASET*****\n\n')
eval_dataloader_argType = DataLoaderWithTaskname(
'ArgType',
data_loader=DataLoader(
val_dataset['ArgType'],
batch_size=trainer.args.eval_batch_size,
collate_fn=trainer.data_collator.collate_batch,
),
)
preds_arg = trainer.prediction_loop(eval_dataloader_argType, description='Validation ArgType')
eval_dataloader_agent = DataLoaderWithTaskname(
'Actor',
data_loader=DataLoader(
val_dataset['Actor'],
batch_size=trainer.args.eval_batch_size,
collate_fn=trainer.data_collator.collate_batch,
),
)
preds_agent = trainer.prediction_loop(eval_dataloader_agent, description='Validation Agent')
# postprocess (remove -100 indices)
labels_argType_wordlevel = []
preds_argType_wordlevel = []
for l,p in zip(preds_arg.label_ids, np.argmax(preds_arg.predictions, axis=2)):
ind = np.logical_and(p > -1, l > -1)
labels_argType_wordlevel.append(l[ind])
preds_argType_wordlevel.append(p[ind])
print('ArgType:')
print('Macro F1: ', compute_macro_f1(gold=labels_argType_wordlevel, pred=preds_argType_wordlevel, id2label=id2label_argType))
# postprocess (remove -100 indices)
labels_agent_wordlevel = []
preds_agent_wordlevel = []
for l,p in zip(preds_agent.label_ids, np.argmax(preds_agent.predictions, axis=2)):
ind = np.logical_and(p > -1, l > -1)
labels_agent_wordlevel.append(l[ind])
preds_agent_wordlevel.append(p[ind])
print('Agent:')
print('Macro F1: ', compute_macro_f1(gold=labels_agent_wordlevel, pred=preds_agent_wordlevel, id2label=id2label_agent))
# save predictions
save_predictions(val_dataset['ArgType']['tokens'], np.argmax(preds_arg.predictions, axis=2), preds_arg.label_ids, os.path.join(pathprefix, output_dir, 'val_preds/', '_'.join(model.split('/')[-3:]) + '-argType.csv'))
save_predictions(val_dataset['Actor']['tokens'], np.argmax(preds_agent.predictions, axis=2), preds_agent.label_ids, os.path.join(pathprefix, output_dir, 'val_preds/', '_'.join(model.split('/')[-3:]) + '-agent.csv'))
# evaluate test data if specified
if args.do_test:
print('\n\n*****TEST DATASET*****\n\n')
eval_dataloader_argType = DataLoaderWithTaskname(
'ArgType',
data_loader=DataLoader(
test_dataset['ArgType'],
batch_size=trainer.args.eval_batch_size,
collate_fn=trainer.data_collator.collate_batch,
),
)
preds_arg = trainer.prediction_loop(eval_dataloader_argType, description='Validation ArgType')
eval_dataloader_agent = DataLoaderWithTaskname(
'Actor',
data_loader=DataLoader(
test_dataset['Actor'],
batch_size=trainer.args.eval_batch_size,
collate_fn=trainer.data_collator.collate_batch,
),
)
preds_agent = trainer.prediction_loop(eval_dataloader_agent, description='Validation Agent')
# postprocess (remove -100 indices)
labels_argType_wordlevel = []
preds_argType_wordlevel = []
for l,p in zip(preds_arg.label_ids, np.argmax(preds_arg.predictions, axis=2)):
ind = np.logical_and(p > -1, l > -1)
labels_argType_wordlevel.append(l[ind])
preds_argType_wordlevel.append(p[ind])
print('ArgType:')
print('Macro F1: ', compute_macro_f1(gold=labels_argType_wordlevel, pred=preds_argType_wordlevel, id2label=id2label_argType))
# postprocess (remove -100 indices)
labels_agent_wordlevel = []
preds_agent_wordlevel = []
for l,p in zip(preds_agent.label_ids, np.argmax(preds_agent.predictions, axis=2)):
ind = np.logical_and(p > -1, l > -1)
labels_agent_wordlevel.append(l[ind])
preds_agent_wordlevel.append(p[ind])
print('Agent:')
print('Macro F1: ', compute_macro_f1(gold=labels_agent_wordlevel, pred=preds_agent_wordlevel, id2label=id2label_agent))
# save predictions
save_predictions(test_dataset['ArgType']['tokens'], np.argmax(preds_arg.predictions, axis=2), preds_arg.label_ids, os.path.join(pathprefix, output_dir, '_'.join(model.split('/')[-3:]) + '-argType.csv'))
save_predictions(test_dataset['Actor']['tokens'], np.argmax(preds_agent.predictions, axis=2), preds_agent.label_ids, os.path.join(pathprefix, output_dir, '_'.join(model.split('/')[-3:]) + '-agent.csv'))
| 20,634 | 44.855556 | 227 | py |
mining-legal-arguments | mining-legal-arguments-main/create_arg_mining_dataset.py | from cassis import *
from collections import Counter
from prettytable import PrettyTable
import os
import pandas as pd
import numpy as np
original_data = True
id2label_argType = ['O', 'B-Subsumtion', 'I-Subsumtion', 'B-Entscheidung des EGMR', 'I-Entscheidung des EGMR',
'B-Vorherige Rechtsprechung des EGMR', 'I-Vorherige Rechtsprechung des EGMR',
'B-Intitutionelle Argumente - Einschätzungsspielraum/Margin of Appreciation',
'I-Intitutionelle Argumente - Einschätzungsspielraum/Margin of Appreciation',
'B-Intitutionelle Argumente - Distinguishing', 'I-Intitutionelle Argumente - Distinguishing',
'B-Intitutionelle Argumente - Overruling', 'I-Intitutionelle Argumente - Overruling',
'B-Verhältnismäßigkeitsprüfung - Angemessenheit/Erforderlichkeit',
'I-Verhältnismäßigkeitsprüfung - Angemessenheit/Erforderlichkeit',
'B-Verhältnismäßigkeitsprüfung - Geeignetheit', 'I-Verhältnismäßigkeitsprüfung - Geeignetheit',
'B-Verhältnismäßigkeitsprüfung - Legitimer Zweck', 'I-Verhältnismäßigkeitsprüfung - Legitimer Zweck' ,
'B-Verhältnismäßigkeitsprüfung - Rechtsgrundlage', 'I-Verhältnismäßigkeitsprüfung - Rechtsgrundlage',
'B-Auslegungsmethoden - Rechtsvergleichung', 'I-Auslegungsmethoden - Rechtsvergleichung',
'B-Auslegungsmethoden - Sinn & Zweck', 'I-Auslegungsmethoden - Sinn & Zweck',
'B-Auslegungsmethoden - Systematische Auslegung', 'I-Auslegungsmethoden - Systematische Auslegung',
'B-Auslegungsmethoden - Historische Auslegung', 'I-Auslegungsmethoden - Historische Auslegung',
'B-Auslegungsmethoden - Wortlaut', 'I-Auslegungsmethoden - Wortlaut',
'B-Konsens der prozessualen Parteien', 'I-Konsens der prozessualen Parteien']
label2id_argType = {}
for i, label in enumerate(id2label_argType):
label2id_argType[label] = i
id2label_agent = ['O', 'B-Beschwerdeführer', 'I-Beschwerdeführer', 'B-EGMR', 'I-EGMR', 'B-Staat',
'I-Staat', 'B-Kommission/Kammer', 'I-Kommission/Kammer', 'B-Dritte', 'I-Dritte']
label2id_agent = {}
for i, label in enumerate(id2label_agent):
label2id_agent[label] = i
with open('gold_data/TypeSystem.xml', 'rb') as f:
typesystem = load_typesystem(f)
def read_xmi(file, typesystem=typesystem):
"""
Reads the data from an xmi file and returns the tokens, paragraphs and annotations.
:param file: path to xmi file
:param typesystem: typesystem of the xmi
:return: tokens, paragraphs and annotations"""
with open(file, 'rb') as f:
cas = load_cas_from_xmi(f, typesystem=typesystem)
tokens = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token')
#paragraphs = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Paragraph')
paragraphs = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence')
#sents = cas.select('de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence')
annotations = cas.select('webanno.custom.LegalArgumentation')
return tokens, paragraphs, annotations
def show_distribution(labels, flatten=False):
"""
Shows the distribution of the labels in the provided list as a table.
:param labels: list of labels"""
table = PrettyTable(['LABEL', 'FREQUENCY', 'PERCENTAGE'])
table.float_format['PERCENTAGE'] = '.2'
table.sortby = 'PERCENTAGE'
table.reversesort = True
if flatten:
labels = [label for sublist in labels for label in sublist]
freq = Counter(labels)
for item in freq:
table.add_row([item, freq[item], freq[item] / len(labels) * 100])
print(table.get_string(end=51)) # change if using more than 25 Arg Types (25* 'B-' + 25* 'I-' + 'O' tag)
def read_data(path):
"""
Reads the data from all xmi files in the specified path and returns lists of the tokens, paragraphs and annotations.
:param path: path to directory with the xmi files
:return: list of tokens, list of paragraphs and annotations"""
docs_token = []
docs_paras = []
docs_anno = []
files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xmi')]
# when using exactly the data of the thesis, delete the two files with less than 5 annotations
if original_data:
del files[files.index(os.path.join(path, '001-67472.xmi'))]
del files[files.index(os.path.join(path, '001-175007.xmi'))]
for f in files:
data = read_xmi(f)
docs_token.append(data[0])
docs_paras.append(data[1])
docs_anno.append(data[2])
return docs_token, docs_paras, docs_anno
def prepare_data(tokens, annotations):
"""
Converts the xmi data into the tokens and corresponding BIO tags for the argType and agent.
:param tokens: list of xmi tokens
:param annotations: list of xmi annotations
:return: list of tokens, list of argType BIO tags and list of agent BIO tags"""
tokens_raw = [x.get_covered_text() for x in tokens]
# id -> position in list of tokens so we can insert the bio tag at appropriate place
lookup = dict()
for i, token in enumerate(tokens):
lookup[token.xmiID] = i
bio_tags_args = len(tokens)*['O'] # O tag for non-arguments
for anno in annotations:
start = anno.begin
end = anno.end
for tok in tokens:
# B tag for begin
if tok.begin == start and anno.ArgType is not None:
bio_tags_args[lookup[tok.xmiID]] = 'B-' + anno.ArgType
# I tag for in between start and end(can't be == end because end is exclusive)
elif tok.begin > start and tok.begin < end and anno.ArgType is not None:
bio_tags_args[lookup[tok.xmiID]] = 'I-' + anno.ArgType
# same for agent tags
bio_tags_agent = len(tokens)*['O'] # O tag for non-arguments
for anno in annotations:
start = anno.begin
end = anno.end
for tok in tokens:
# B tag for begin
if tok.begin == start and anno.Akteur is not None:
bio_tags_agent[lookup[tok.xmiID]] = 'B-' + anno.Akteur
# I tag for in between start and end(can't be == end because end is exclusive)
elif tok.begin > start and tok.begin < end and anno.Akteur is not None:
bio_tags_agent[lookup[tok.xmiID]] = 'I-' + anno.Akteur
return tokens_raw, bio_tags_args, bio_tags_agent
def paragraphed_tokens(tokens, paragraphs, shorten=True):
"""
Divides the tokens into our input units (paragraphs).
:param tokens: list of xmi tokens
:param annotations: list of xmi paragraphs
:return: list of paragraphs"""
paragraphed_token = []
for para in paragraphs:
start = para.begin
end = para.end
para_toks = []
for tok in tokens:
if tok.begin >= start and tok.begin < end:
para_toks.append(tok.get_covered_text())
paragraphed_token.append(para_toks)
# shorten files because argumentation starts only after "THE LAW"
if shorten and para_toks == ['THE', 'LAW']:
paragraphed_token = []
if shorten and para_toks == ['AS', 'TO', 'THE', 'LAW']:
paragraphed_token = []
return paragraphed_token
def save_docs(path, filenames, docs_paragraphed_tokens, docs_paragraphed_labels):
"""
Saves the files as csv files with tokens and labels.
:param path: directory to save at
:param filenames: list of the names of each file
:param docs_paragraphed_tokens: list with the paragraphed tokens for each file
:param docs_paragraphed_labels: list with the paragraphed labels for each file"""
assert len(filenames) == len(docs_paragraphed_tokens), 'Number of filenames should match the length of the document lists'
for i, file in enumerate(filenames):
df = pd.DataFrame({'tokens': docs_paragraphed_tokens[i], 'labels': docs_paragraphed_labels[i]})
df.to_csv(os.path.join(path, file) , sep='\t', encoding='utf-8', index=False)
show_stats = True
subword_stats = True
# read files
docs_tok, docs_para, docs_anno = read_data('gold_data/')
# convert xmi data to tokens and tags for each file
docs_tok_raw = []
docs_bio_args = []
docs_bio_agents = []
for i in range(len(docs_anno)):
data = prepare_data(docs_tok[i], docs_anno[i])
docs_tok_raw.append(data[0])
docs_bio_args.append(data[1])
docs_bio_agents.append(data[2])
# get paragraphed tokens for each file
docs_para_tok = [paragraphed_tokens(tokens, paragraphs, shorten=True) for tokens, paragraphs in zip(docs_tok, docs_para)]
# also shorten ArgType and Agent tags
for i, doc in enumerate(docs_para_tok):
skipped = len([item for sublist in doc for item in sublist])
docs_bio_args[i] = docs_bio_args[i][-skipped:]
docs_bio_agents[i] = docs_bio_agents[i][-skipped:]
# get paragraph representation for both tagsets
docs_para_argType = []
docs_para_agent = []
for i, paragraphed_tokens in enumerate(docs_para_tok):
start = 0
paragraphed_labels_argType = []
paragraphed_labels_agent = []
for par in paragraphed_tokens:
end = start + len(par)
paragraphed_labels_argType.append(docs_bio_args[i][start:end])
paragraphed_labels_agent.append(docs_bio_agents[i][start:end])
start = end
docs_para_argType.append(paragraphed_labels_argType)
docs_para_agent.append(paragraphed_labels_agent)
# save files
files = [f[:-4] + '.csv' for f in os.listdir('gold_data/') if f.endswith('.xmi')]
# when using exactly the data of the thesis, delete the two files with less than 5 annotations
if original_data:
del files[files.index('001-67472.csv')]
del files[files.index('001-175007.csv')]
save_docs('new_data/argType/', files, docs_para_tok, docs_para_argType)
save_docs('new_data/agent/', files, docs_para_tok, docs_para_agent)
# compute cutoffs for train, val, test in 80/10/10 split
trainindex = int(0.8 * len(docs_para_tok))
valindex = int(0.9 * len(docs_para_tok))
# save partitioned data
save_docs('new_data/train/argType/', files[:trainindex], docs_para_tok[:trainindex], docs_para_argType[:trainindex])
save_docs('new_data/train/agent/', files[:trainindex], docs_para_tok[:trainindex], docs_para_agent[:trainindex])
save_docs('new_data/val/argType/', files[trainindex:valindex], docs_para_tok[trainindex:valindex], docs_para_argType[trainindex:valindex])
save_docs('new_data/val/agent/', files[trainindex:valindex], docs_para_tok[trainindex:valindex], docs_para_agent[trainindex:valindex])
save_docs('new_data/test/argType/', files[valindex:], docs_para_tok[valindex:], docs_para_argType[valindex:])
save_docs('new_data/test/agent/', files[valindex:], docs_para_tok[valindex:], docs_para_agent[valindex:])
if show_stats:
print('ArgTypes at Argument Level:')
show_distribution([anno.ArgType for annotations in docs_anno for anno in annotations])
print('Agents at Argument Level:')
show_distribution([anno.Akteur for annotations in docs_anno for anno in annotations])
print('BIO Tags of ArgTypes: ')
show_distribution([argType for doc in docs_para_argType for argType in doc], flatten=True)
print('BIO Tags of Agents: ')
show_distribution([agent for doc in docs_para_agent for agent in doc], flatten=True)
# further statistics
all_para_toks = [token for sublist in docs_para_tok for token in sublist]
all_para_argType = [argType for sublist in docs_para_argType for argType in sublist]
all_para_agent = [agent for sublist in docs_para_agent for agent in sublist]
seq_len = [len(para) for sublist in docs_para_tok for para in sublist]
doc_len = []
for doc in docs_para_tok:
length = 0
for para in doc:
length += len(para)
doc_len.append(length)
print('Sequence Statistics at Word Level:')
table = PrettyTable(['SEQUENCE TYPE', 'LENGTH'])
table.float_format['LENGTH'] = '.1'
table.add_row(['Document Min Length', min(doc_len)])
table.add_row(['Document Max Length', max(doc_len)])
table.add_row(['Document Mean Length', np.mean(doc_len)])
table.add_row(['Document Median Length', np.median(doc_len)])
table.add_row(['Paragraph Min Length', min(seq_len)])
table.add_row(['Paragraph Max Length', max(seq_len)])
table.add_row(['Paragraph Mean Length', np.mean(seq_len)])
table.add_row(['Paragraph Median Length', np.median(seq_len)])
print(table.get_string())
if show_stats and subword_stats:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('roberta-large')
tokenizer.add_prefix_space=True
docs_seq_len = []
for doc in docs_para_tok:
tokenized = tokenizer(doc, is_split_into_words=True)
seq_lens = [len(seq) for seq in tokenized['input_ids']]
docs_seq_len.append(seq_lens)
print('Sequence Statistics at Roberta Tokenized (Subword) Level:')
table = PrettyTable(['SEQUENCE TYPE', 'LENGTH'])
table.float_format['LENGTH'] = '.1'
table.add_row(['Document Min Length', min([sum(doc_lens) for doc_lens in docs_seq_len])])
table.add_row(['Document Max Length', max([sum(doc_lens) for doc_lens in docs_seq_len])])
table.add_row(['Document Mean Length', np.mean([sum(doc_lens) for doc_lens in docs_seq_len])])
table.add_row(['Document Median Length', np.median([sum(doc_lens) for doc_lens in docs_seq_len])])
table.add_row(['Paragraph Min Length', min([para for doc_lens in docs_seq_len for para in doc_lens])])
table.add_row(['Paragraph Max Length', max([para for doc_lens in docs_seq_len for para in doc_lens])])
table.add_row(['Paragraph Mean Length', np.mean([para for doc_lens in docs_seq_len for para in doc_lens])])
table.add_row(['Paragraph Median Length', np.median([para for doc_lens in docs_seq_len for para in doc_lens])])
print(table.get_string())
| 13,984 | 45.616667 | 140 | py |
mining-legal-arguments | mining-legal-arguments-main/create_confusion_matrix.py | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
from ast import literal_eval
from sklearn.metrics import confusion_matrix
from confusion_matrix_pretty_print import pretty_plot_confusion_matrix
id2label_argType = ['B-Distinguishing',
'B-Einschätzungsspielraum',
'B-Entscheidung des EGMR',
'B-Konsens der prozessualen Parteien',
'B-Overruling',
'B-Rechtsvergleichung',
'B-Sinn & Zweck Auslegung',
'B-Subsumtion',
'B-Systematische Auslegung',
'B-Verhältnismäßigkeitsprüfung – Angemessenheit',
'B-Verhältnismäßigkeitsprüfung – Geeignetheit',
'B-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'B-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'B-Vorherige Rechtsprechung des EGMR',
'B-Wortlaut Auslegung',
'I-Distinguishing',
'I-Einschätzungsspielraum',
'I-Entscheidung des EGMR',
'I-Konsens der prozessualen Parteien',
'I-Overruling',
'I-Rechtsvergleichung',
'I-Sinn & Zweck Auslegung',
'I-Subsumtion',
'I-Systematische Auslegung',
'I-Verhältnismäßigkeitsprüfung – Angemessenheit',
'I-Verhältnismäßigkeitsprüfung – Geeignetheit',
'I-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'I-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'I-Vorherige Rechtsprechung des EGMR',
'I-Wortlaut Auslegung',
'O']
id2label_agent = ['B-Beschwerdeführer',
'B-Dritte',
'B-EGMR',
'B-Kommission/Kammer',
'B-Staat',
'I-Beschwerdeführer',
'I-Dritte',
'I-EGMR',
'I-Kommission/Kammer',
'I-Staat',
'O']
def load_predictions(file):
"""
Loads saved model predictions and returns them in a Dataframe.
:param file: path of the predictions to be loaded
:return: DataFrame of these predictions"""
df = pd.read_csv(file, sep='\t', encoding='utf-8')
df['Labels'] = df['Labels'].map(lambda x: literal_eval(x))
df['Predictions'] = df['Predictions'].map(lambda x: literal_eval(x))
df['Tokens'] = df['Tokens'].map(lambda x: literal_eval(x))
return df
# name of cm -> path to predictions without the label type at the end
files = {'LEGAL-BERT': 'predictions/legal-bert-final_checkpoint-39820_bert',
'RoBERTa Large': 'predictions/roberta-large-final_checkpoint-111482_roberta',
'Further Pretraining for 13k steps of RoBERTa Large on legal data': 'predictions/roberta-large-fp-13000_checkpoint-95556_roberta',
'Further Pretraining for 15k steps of RoBERTa Large on legal data': 'predictions/roberta-large-fp-15000_checkpoint-143334_roberta'}
# plot cms
cmap = 'Oranges'
for k,v in files.items():
arg = load_predictions(v + '-argType.csv')
ag = load_predictions(v + '-agent.csv')
# agent
cm = confusion_matrix([l for sublist in ag['Labels'] for l in sublist], [p for sublist in ag['Predictions'] for p in sublist], labels=range(len(id2label_agent)))
df_cm = pd.DataFrame(cm, index=id2label_agent, columns=id2label_agent)
pretty_plot_confusion_matrix(df_cm, cmap=cmap, figsize=[11,11], title='Confusion Matrix ' + k + ' Agent', path='figures/confusion_matrices/cm_' + '_'.join(k.split()) + '_agent.png')
# arg type
cm = confusion_matrix([l for sublist in arg['Labels'] for l in sublist], [p for sublist in arg['Predictions'] for p in sublist], labels=range(len(id2label_argType)))
df_cm = pd.DataFrame(cm, index=id2label_argType, columns=id2label_argType)
pretty_plot_confusion_matrix(df_cm, cmap=cmap, figsize=[31,31], title='Confusion Matrix ' + k + ' Argument Type', path='figures/confusion_matrices/cm_' + '_'.join(k.split()) + '_argType.png')
| 3,478 | 38.534091 | 195 | py |
mining-legal-arguments | mining-legal-arguments-main/multiTaskModel.py | #!/usr/bin/env python
# coding: utf-8
from collections import Counter
from prettytable import PrettyTable
import os
from transformers import AutoTokenizer
import torch
from torch.utils.data import Dataset
import pandas as pd
from datasets import load_dataset, load_metric
import csv
from ast import literal_eval
import numpy as np
import torch
import torch.nn as nn
import transformers
from datasets import load_dataset, load_metric
import logging
import dataclasses
from torch.utils.data.dataloader import DataLoader
from transformers.training_args import is_torch_tpu_available
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.data.data_collator import DataCollator, InputDataClass
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from typing import List, Union, Dict
from transformers import DataCollatorForTokenClassification
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.file_utils import PaddingStrategy
from typing import Optional, Any
import argparse
from tabulate import tabulate
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
id2label_argType = ['B-Distinguishing',
'B-Einschätzungsspielraum',
'B-Entscheidung des EGMR',
'B-Konsens der prozessualen Parteien',
'B-Overruling',
'B-Rechtsvergleichung',
'B-Sinn & Zweck Auslegung',
'B-Subsumtion',
'B-Systematische Auslegung',
'B-Verhältnismäßigkeitsprüfung – Angemessenheit',
'B-Verhältnismäßigkeitsprüfung – Geeignetheit',
'B-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'B-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'B-Vorherige Rechtsprechung des EGMR',
'B-Wortlaut Auslegung',
'I-Distinguishing',
'I-Einschätzungsspielraum',
'I-Entscheidung des EGMR',
'I-Konsens der prozessualen Parteien',
'I-Overruling',
'I-Rechtsvergleichung',
'I-Sinn & Zweck Auslegung',
'I-Subsumtion',
'I-Systematische Auslegung',
'I-Verhältnismäßigkeitsprüfung – Angemessenheit',
'I-Verhältnismäßigkeitsprüfung – Geeignetheit',
'I-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'I-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'I-Vorherige Rechtsprechung des EGMR',
'I-Wortlaut Auslegung',
'O']
label2id_argType = {}
for i, label in enumerate(id2label_argType):
label2id_argType[label] = i
id2label_agent = ['B-Beschwerdeführer',
'B-Dritte',
'B-EGMR',
'B-Kommission/Kammer',
'B-Staat',
'I-Beschwerdeführer',
'I-Dritte',
'I-EGMR',
'I-Kommission/Kammer',
'I-Staat',
'O']
label2id_agent = {}
for i, label in enumerate(id2label_agent):
label2id_agent[label] = i
def tokenize_and_align_labels_argType(examples, label_all_tokens=False):
"""
Tokenizes the input using the tokenizer and aligns the argument type labels to the subwords.
:param examples: input dataset
:param label_all_tokens: Whether to label all subwords of a token or only the first subword
:return: Tokenized input"""
tokenized_inputs = tokenizer(examples['tokens'], truncation=True, is_split_into_words=True)
labels = []
for i, label in enumerate(examples['labels']):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label2id_argType[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label2id_argType[label[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
def tokenize_and_align_labels_agent(examples, label_all_tokens=False):
"""
Tokenizes the input using the tokenizer and aligns the agent labels to the subwords.
:param examples: input dataset
:param label_all_tokens: Whether to label all subwords of a token or only the first subword
:return: Tokenized input"""
tokenized_inputs = tokenizer(examples['tokens'], truncation=True, is_split_into_words=True)
labels = []
for i, label in enumerate(examples['labels']):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label2id_agent[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label2id_agent[label[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
class MultitaskModel(transformers.PreTrainedModel):
def __init__(self, encoder, taskmodels_dict):
"""
Setting MultitaskModel up as a PretrainedModel allows us
to take better advantage of Trainer features
"""
super().__init__(transformers.PretrainedConfig())
self.encoder = encoder
self.taskmodels_dict = nn.ModuleDict(taskmodels_dict)
@classmethod
def create(cls, model_name, model_type_dict, model_config_dict):
"""
This creates a MultitaskModel using the model class and config objects
from single-task models.
We do this by creating each single-task model, and having them share
the same encoder transformer.
"""
shared_encoder = None
taskmodels_dict = {}
for task_name, model_type in model_type_dict.items():
model = model_type.from_pretrained(
model_name,
config=model_config_dict[task_name],
)
if shared_encoder is None:
shared_encoder = getattr(model, cls.get_encoder_attr_name(model))
else:
setattr(model, cls.get_encoder_attr_name(model), shared_encoder)
taskmodels_dict[task_name] = model
return cls(encoder=shared_encoder, taskmodels_dict=taskmodels_dict)
@classmethod
def get_encoder_attr_name(cls, model):
"""
The encoder transformer is named differently in each model "architecture".
This method lets us get the name of the encoder attribute
"""
model_class_name = model.__class__.__name__
if model_class_name.startswith("Bert"):
return "bert"
elif model_class_name.startswith("Roberta"):
return "roberta"
elif model_class_name.startswith("Albert"):
return "albert"
elif model_class_name.startswith("DistilBert"):
return "distilbert"
else:
raise KeyError(f"Add support for new model {model_class_name}")
def forward(self, task_name, **kwargs):
return self.taskmodels_dict[task_name](**kwargs)
class StrIgnoreDevice(str):
"""
This is a hack. The Trainer is going call .to(device) on every input
value, but we need to pass in an additional `task_name` string.
This prevents it from throwing an error
"""
def to(self, device):
return self
class DataLoaderWithTaskname:
"""
Wrapper around a DataLoader to also yield a task name
"""
def __init__(self, task_name, data_loader):
self.task_name = task_name
self.data_loader = data_loader
self.batch_size = data_loader.batch_size
self.dataset = data_loader.dataset
def __len__(self):
return len(self.data_loader)
def __iter__(self):
for batch in self.data_loader:
batch["task_name"] = StrIgnoreDevice(self.task_name)
yield batch
class MultitaskDataloader:
"""
Data loader that combines and samples from multiple single-task
data loaders.
"""
def __init__(self, dataloader_dict):
self.dataloader_dict = dataloader_dict
self.num_batches_dict = {
task_name: len(dataloader)
for task_name, dataloader in self.dataloader_dict.items()
}
self.task_name_list = list(self.dataloader_dict)
self.dataset = [None] * sum(
len(dataloader.dataset)
for dataloader in self.dataloader_dict.values()
)
def __len__(self):
return sum(self.num_batches_dict.values())
def __iter__(self):
"""
For each batch, sample a task, and yield a batch from the respective
task Dataloader.
We use size-proportional sampling, but you could easily modify this
to sample from some-other distribution.
"""
task_choice_list = []
for i, task_name in enumerate(self.task_name_list):
task_choice_list += [i] * self.num_batches_dict[task_name]
task_choice_list = np.array(task_choice_list)
np.random.shuffle(task_choice_list)
dataloader_iter_dict = {
task_name: iter(dataloader)
for task_name, dataloader in self.dataloader_dict.items()
}
for task_choice in task_choice_list:
task_name = self.task_name_list[task_choice]
yield next(dataloader_iter_dict[task_name])
class MultitaskTrainer(transformers.Trainer):
def get_single_train_dataloader(self, task_name, train_dataset):
"""
Create a single-task data loader that also yields task names
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_torch_tpu_available():
train_sampler = get_tpu_sampler(train_dataset)
else:
train_sampler = (
RandomSampler(train_dataset)
if self.args.local_rank == -1
else DistributedSampler(train_dataset)
)
data_loader = DataLoaderWithTaskname(
task_name=task_name,
data_loader=DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
),
)
if is_torch_tpu_available():
data_loader = pl.ParallelLoader(
data_loader, [self.args.device]
).per_device_loader(self.args.device)
return data_loader
def get_train_dataloader(self):
"""
Returns a MultitaskDataloader, which is not actually a Dataloader
but an iterable that returns a generator that samples from each
task Dataloader.
"""
return MultitaskDataloader({
task_name: self.get_single_train_dataloader(task_name, task_dataset)
for task_name, task_dataset in self.train_dataset.items()
})
def get_eval_dataloader(self, q):
"""
Returns a DataLoaderWithTaskname for the argument type task
for evaluation of it during the training.
"""
eval_dataloader_argType = DataLoaderWithTaskname(
'ArgType',
data_loader=DataLoader(
eval_dataset['ArgType'],
batch_size=trainer.args.eval_batch_size,
collate_fn=trainer.data_collator.collate_batch,
),
)
return eval_dataloader_argType
def save_model(self, output_dir: Optional[str] = None):
"""
Saving best-practices: if you use default names for the model,
you can reload it using from_pretrained().
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
xm.rendezvous("saving_checkpoint")
torch.save(self.model, os.path.join(output_dir, self.model.encoder.base_model_prefix))
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Low-Level workaround for MultiTaskModel
torch.save(self.model, os.path.join(output_dir, self.model.encoder.base_model_prefix))
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
@dataclasses.dataclass
class MyDataCollatorForTokenClassification:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
# call not used?
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def collate_batch(self, features, pad_to_multiple_of: Optional[int] = None):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
del batch['tokens']
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def compute_f1(label, gold, pred):
"""
Computes the F1 Score for a single class.
:param labal: the class to compute the score for
:param gold: the gold standard
:param pred: the model predictions
:return: the F1 score for the label"""
tp = 0
fp = 0
fn = 0
for i, sent in enumerate(pred):
for j, tag in enumerate(sent):
# check for relevant label to compute F1
if tag == label:
# if relevant and equals gold -> true positive
if tag == gold[i][j]:
tp += 1
# if it differs from gold -> false positive
else:
fp += 1
# we have a negative, so check if it's a false negative
else:
if gold[i][j] == label:
fn += 1
# use epsilon to avoid division by zero
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
return f1
def compute_macro_f1(gold, pred, id2label):
"""
Computes the Macro F1 Score over all classes.
:param gold: the gold standard
:param pred: the model predictions
:param id2label: the mapping list for the current labels
:return: the Macro F1 score"""
f1s = [(tag, compute_f1(tag, gold, pred)) for tag in range(len(id2label))]
all_f1s = [(id2label[idx], score) for idx, score in f1s]
df = pd.DataFrame(all_f1s, columns=['Label', 'F1'])
df['F1'] = np.around(df['F1'], decimals=4)
print(tabulate(df, headers='keys', tablefmt='pretty', showindex=False))
f1_scores = [f1[1] for f1 in f1s]
macro_f1 = np.sum(f1_scores) / len(f1_scores)
#print('Macro F1: ', macro_f1)
return macro_f1
def eval_f1(evalpred):
"""
Computes the Macro F1 Score over all argument type classes during train evaluation.
:param evalpred: evalpred from the trainer
:return: the Macro F1 score"""
pred = []
gold = []
for p,l in zip(np.argmax(evalpred.predictions, axis=2), evalpred.label_ids):
ind = np.logical_and(p > -1, l > -1)
pred.append(p[ind])
gold.append(l[ind])
f1s = [(tag, compute_f1(tag, gold, pred)) for tag in range(len(id2label_argType))]
all_f1s = [(id2label_argType[idx], score) for idx, score in f1s]
#print('F1 for each Class: ', all_f1s)
f1_scores = [f1[1] for f1 in f1s]
macro_f1 = np.sum(f1_scores) / len(f1_scores)
return {"F1 ArgType": macro_f1}
if __name__ == '__main__':
# parse optional args
parser = argparse.ArgumentParser(description='Train a MultiTask model')
parser.add_argument('--pathprefix', help='path to the project directory')
parser.add_argument('--model', help='name of the model or path to the model')
parser.add_argument('--tokenizer', help='name of the model or path to the tokenizer')
parser.add_argument('--batch_size', type=int, help='batch size of the model')
parser.add_argument('--output_dir', help='path to the output directory')
args = parser.parse_args()
# path to working directory
pathprefix = '/ukp-storage-1/dfaber/'
#pathprefix = ''
if args.pathprefix:
pathprefix = args.pathprefix
# load datasets
trainfiles = [f for f in os.listdir(pathprefix + 'data/train/argType/') if f.endswith('.csv')]
valfiles = [f for f in os.listdir(pathprefix + 'data/val/argType/') if f.endswith('.csv')]
dataset_argType = load_dataset('csv', data_files={'train': [pathprefix + 'data/train/argType/' + file for file in trainfiles],
'validation': [pathprefix + 'data/val/argType/' + file for file in valfiles]}, delimiter='\t')
dataset_actor = load_dataset('csv', data_files={'train': [pathprefix + 'data/train/agent/' + file for file in trainfiles],
'validation': [pathprefix + 'data/val/agent/' + file for file in valfiles]}, delimiter='\t')
dataset_argType = dataset_argType.map(lambda x: {'tokens': literal_eval(x['tokens']), 'labels': literal_eval(x['labels'])})
dataset_actor = dataset_actor.map(lambda x: {'tokens': literal_eval(x['tokens']), 'labels': literal_eval(x['labels'])})
# select the model with the correspronding tokenizer
#model_name = "/ukp-storage-1/dfaber/models/court_bert/checkpoint-20000"
#tokenizer = AutoTokenizer.from_pretrained('/ukp-storage-1/dfaber/legal_tokenizer_bert', do_lower_case=False)
model_name = "/ukp-storage-1/dfaber/models/roberta-large-finetuned/checkpoint-15000"
#model_name = 'roberta-large'
tokenizer = AutoTokenizer.from_pretrained('roberta-large')
#model_name = 'nlpaueb/legal-bert-base-uncased'
#tokenizer = AutoTokenizer.from_pretrained('nlpaueb/legal-bert-base-uncased')
# use parsed args if provided
if args.model:
model_name = args.model
if args.tokenizer:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
# need prefix space for already tokenized data
if 'roberta' in model_name:
tokenizer.add_prefix_space = True
if tokenizer.model_max_length > 1024:
tokenizer.model_max_length = 512
# tokenize and align labels
tokenized_dataset_argType = dataset_argType.map(tokenize_and_align_labels_argType, batched=True)
tokenized_dataset_actor = dataset_actor.map(tokenize_and_align_labels_agent, batched=True)
# create multitask dataset
dataset_dict = {
"ArgType": tokenized_dataset_argType,
"Actor": tokenized_dataset_actor,
}
# create multitask model
multitask_model = MultitaskModel.create(
model_name=model_name,
model_type_dict={
"ArgType": transformers.AutoModelForTokenClassification,
"Actor": transformers.AutoModelForTokenClassification,
},
model_config_dict={
"ArgType": transformers.AutoConfig.from_pretrained(model_name, num_labels=len(id2label_argType)),
"Actor": transformers.AutoConfig.from_pretrained(model_name, num_labels=len(id2label_agent)),
},
)
# create data collator
data_collator= MyDataCollatorForTokenClassification(tokenizer)
# split dataset into training and evaluation (dev) dataset
train_dataset = {
task_name: dataset["train"]
for task_name, dataset in dataset_dict.items()
}
eval_dataset = {
task_name: dataset["validation"]
for task_name, dataset in dataset_dict.items()
}
# set training parameter and train the model
output_dir = pathprefix + 'models/multitask/roberta-large-fp-15000'
batch_size = 4
# use parsed if provided
if args.output_dir:
output_dir = args.output_dir
if args.batch_size:
batch_size = args.batch_size
train_args = transformers.TrainingArguments(
output_dir,
evaluation_strategy = "epoch",
logging_steps=1592,
learning_rate=1e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=10,
weight_decay=0.01,
warmup_steps=1000,
save_steps=15926,
save_total_limit = 10,
logging_dir=pathprefix + 'logs',
)
trainer = MultitaskTrainer(
model=multitask_model,
args=train_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
compute_metrics=eval_f1,
)
trainer.train()
| 25,615 | 37.232836 | 144 | py |
mining-legal-arguments | mining-legal-arguments-main/compare_f1s.py | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
import csv
from ast import literal_eval
from collections import Counter
import numpy as np
from tabulate import tabulate
from multiTaskModel import compute_f1
id2label_argType = ['B-Distinguishing',
'B-Einschätzungsspielraum',
'B-Entscheidung des EGMR',
'B-Konsens der prozessualen Parteien',
'B-Overruling',
'B-Rechtsvergleichung',
'B-Sinn & Zweck Auslegung',
'B-Subsumtion',
'B-Systematische Auslegung',
'B-Verhältnismäßigkeitsprüfung – Angemessenheit',
'B-Verhältnismäßigkeitsprüfung – Geeignetheit',
'B-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'B-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'B-Vorherige Rechtsprechung des EGMR',
'B-Wortlaut Auslegung',
'I-Distinguishing',
'I-Einschätzungsspielraum',
'I-Entscheidung des EGMR',
'I-Konsens der prozessualen Parteien',
'I-Overruling',
'I-Rechtsvergleichung',
'I-Sinn & Zweck Auslegung',
'I-Subsumtion',
'I-Systematische Auslegung',
'I-Verhältnismäßigkeitsprüfung – Angemessenheit',
'I-Verhältnismäßigkeitsprüfung – Geeignetheit',
'I-Verhältnismäßigkeitsprüfung – Legitimer Zweck',
'I-Verhältnismäßigkeitsprüfung – Rechtsgrundlage',
'I-Vorherige Rechtsprechung des EGMR',
'I-Wortlaut Auslegung',
'O']
label2id_argType = {}
for i, label in enumerate(id2label_argType):
label2id_argType[label] = i
id2label_agent = ['B-Beschwerdeführer',
'B-Dritte',
'B-EGMR',
'B-Kommission/Kammer',
'B-Staat',
'I-Beschwerdeführer',
'I-Dritte',
'I-EGMR',
'I-Kommission/Kammer',
'I-Staat',
'O']
label2id_agent = {}
for i, label in enumerate(id2label_agent):
label2id_agent[label] = i
def load_predictions(file):
"""
Loads saved model predictions and returns them in a Dataframe.
:param file: path of the predictions to be loaded
:return: DataFrame of these predictions"""
df = pd.read_csv(file, sep='\t', encoding='utf-8')
df['Labels'] = df['Labels'].map(lambda x: literal_eval(x))
df['Predictions'] = df['Predictions'].map(lambda x: literal_eval(x))
df['Tokens'] = df['Tokens'].map(lambda x: literal_eval(x))
return df
def compute_macro_f1(gold, pred, id2label):
"""
Computes the Macro F1 Score over all classes.
:param gold: the gold standard
:param pred: the model predictions
:return: the Macro F1 score"""
f1s = [(tag, compute_f1(tag, gold, pred)) for tag in range(len(id2label))]
all_f1s = [(id2label[idx], score) for idx, score in f1s]
#print('F1 for each Class: ', all_f1s)
f1_scores = [f1[1] for f1 in f1s]
macro_f1 = np.sum(f1_scores) / len(f1_scores)
#print('Macro F1: ', macro_f1)
return np.around(macro_f1, decimals=4), all_f1s
# choose models to compare. can change models to own model. schema $DisplayName: $prediction_path_without_label_type_at_the_end
# dev set
files = {'LB1': 'predictions/val_preds/legal-bert-final_checkpoint-39820_bert', 'LB2': 'predictions/val_preds/legal-bert-final_checkpoint-47784_bert',
'RBL1': 'predictions/val_preds/roberta-large-final_checkpoint-111482_roberta', 'RBL2': 'predictions/val_preds/roberta-large-final_checkpoint-143334_roberta',
'FP13k1': 'predictions/val_preds/roberta-large-fp-13000_checkpoint-95556_roberta', 'FP13k2': 'predictions/val_preds/roberta-large-fp-13000_checkpoint-143334_roberta',
'FP15k1': 'predictions/val_preds/roberta-large-fp-15000_checkpoint-143334_roberta', 'FP15k2': 'predictions/val_preds/roberta-large-fp-15000_checkpoint-159260_roberta'}
# test set
files = {'LB1': 'predictions/val_preds/legal-bert-final_checkpoint-39820_bert', 'LB2': 'predictions/legal-bert-final_checkpoint-47784_bert',
'RBL1': 'predictions/roberta-large-final_checkpoint-111482_roberta', 'RBL2': 'predictions/roberta-large-final_checkpoint-143334_roberta',
'FP13k1': 'predictions/roberta-large-fp-13000_checkpoint-95556_roberta', 'FP13k2': 'predictions/roberta-large-fp-13000_checkpoint-143334_roberta',
'FP15k1': 'predictions/roberta-large-fp-15000_checkpoint-143334_roberta', 'FP15k2': 'predictions/roberta-large-fp-15000_checkpoint-159260_roberta'}
# test set art 3 (data distribution differs, only shown for the first model, distribution does not reflect data of second model
#files = {'Art. 3': 'predictions/article_3/roberta-large-fp-15000_checkpoint-143334_roberta', 'Best': 'predictions/roberta-large-fp-15000_checkpoint-143334_roberta'}
# initialize DataFrames (ArgType and Agent) with labels and frequency of them and sort by their frequency
for k,v in files.items():
arg = load_predictions(v + '-argType.csv')
agent = load_predictions(v + '-agent.csv')
break
readable_labels_arg = arg['Labels'].map(lambda x: [id2label_argType[y] for y in x])
labels_arg = [label for sublist in readable_labels_arg for label in sublist]
freq_arg = Counter(labels_arg)
df_arg = pd.DataFrame(columns=['Label', 'Frequency', 'Percentage'])
df_arg = df_arg.append({'Label': 'Macro F1', 'Frequency': len(labels_arg), 'Percentage': 100}, ignore_index=True)
for l in freq_arg:
df_arg = df_arg.append({'Label': l, 'Frequency': freq_arg[l], 'Percentage': np.around(freq_arg[l] / len(labels_arg) * 100, decimals=2)}, ignore_index=True)
df_arg = df_arg.sort_values('Frequency', ascending=False, ignore_index=True)
readable_labels_ag = agent['Labels'].map(lambda x: [id2label_agent[y] for y in x])
labels_ag = [label for sublist in readable_labels_ag for label in sublist]
freq_ag = Counter(labels_ag)
df_ag = pd.DataFrame(columns=['Label', 'Frequency', 'Percentage'])
df_ag = df_ag.append({'Label': 'Macro F1', 'Frequency': len(labels_ag), 'Percentage': 100}, ignore_index=True)
for l in freq_ag:
df_ag = df_ag.append({'Label': l, 'Frequency': freq_ag[l], 'Percentage': np.around(freq_ag[l] / len(labels_ag) * 100, decimals=2)}, ignore_index=True)
df_ag = df_ag.sort_values('Frequency', ascending=False, ignore_index=True)
# for each model add f1 scores to the labels
for k,v in files.items():
arg = load_predictions(v + '-argType.csv')
agent = load_predictions(v + '-agent.csv')
f1_arg, f1s_arg = compute_macro_f1(gold=arg.Labels.to_list(), pred=arg.Predictions.to_list(), id2label=id2label_argType)
f1_ag, f1s_ag = compute_macro_f1(gold=agent.Labels.to_list(), pred=agent.Predictions.to_list(), id2label=id2label_agent)
sort_f1s_arg = []
d_arg = dict(f1s_arg)
d_arg['Macro F1'] = f1_arg
for l in df_arg.Label.tolist():
sort_f1s_arg.append(np.around(d_arg[l] * 100, decimals=2))
df_arg[k] = sort_f1s_arg
sort_f1s_ag = []
d_ag = dict(f1s_ag)
d_ag['Macro F1'] = f1_ag
for l in df_ag.Label.tolist():
sort_f1s_ag.append(np.around(d_ag[l] * 100, decimals=2))
df_ag[k] = sort_f1s_ag
# when comparing two models, also include relative change of first model compared to the second
if len(df_arg.columns) == 5:
diffs = []
for i, row in df_arg.iterrows():
diffs.append(- np.around(((1 - (row['Art. 3'] / (row['Best'] + 1e-10))) * 100), decimals=2))
df_arg['Relative Change (%)'] = diffs
diffs = []
for i, row in df_ag.iterrows():
diffs.append(- np.around(((1 - (row['Art. 3'] / (row['Best'] + 1e-10))) * 100), decimals=2))
df_ag['Relative Change (%)'] = diffs
# display results
print('\n*****ArgType*****\n')
print(tabulate(df_arg, headers='keys', tablefmt='pretty', showindex=False))
print('\n*****Agent*****\n')
print(tabulate(df_ag, headers='keys', tablefmt='pretty', showindex=False))
| 7,473 | 42.202312 | 175 | py |
mining-legal-arguments | mining-legal-arguments-main/confusion_matrix_pretty_print.py | # -*- coding: utf-8 -*-
"""
plot a pretty confusion matrix with seaborn
Created on Mon Jun 25 14:17:37 2018
@author: Wagner Cipriano - wagnerbhbr - gmail - CEFETMG / MMC
REFerences:
https://www.mathworks.com/help/nnet/ref/plotconfusion.html
https://stackoverflow.com/questions/28200786/how-to-plot-scikit-learn-classification-report
https://stackoverflow.com/questions/5821125/how-to-plot-confusion-matrix-with-string-axis-rather-than-integer-in-python
https://www.programcreek.com/python/example/96197/seaborn.heatmap
https://stackoverflow.com/questions/19233771/sklearn-plot-confusion-matrix-with-labels/31720054
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
#imports
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from matplotlib.collections import QuadMesh
import seaborn as sn
def get_new_fig(fn, figsize=[9,9]):
""" Init graphics """
fig1 = plt.figure(fn, figsize)
ax1 = fig1.gca() #Get Current Axis
ax1.cla() # clear existing plot
return fig1, ax1
#
def configcell_text_and_colors(array_df, lin, col, oText, facecolors, posi, fz, fmt, show_null_values=0):
"""
config cell text and colors
and return text elements to add and to dell
@TODO: use fmt
"""
text_add = []; text_del = [];
cell_val = array_df[lin][col]
tot_all = array_df[-1][-1]
per = (float(cell_val) / tot_all) * 100
curr_column = array_df[:,col]
ccl = len(curr_column)
#last line and/or last column
if(col == (ccl - 1)) or (lin == (ccl - 1)):
#tots and percents
if(cell_val != 0):
if(col == ccl - 1) and (lin == ccl - 1):
tot_rig = 0
for i in range(array_df.shape[0] - 1):
tot_rig += array_df[i][i]
per_ok = (float(tot_rig) / cell_val) * 100
elif(col == ccl - 1):
tot_rig = array_df[lin][lin]
per_ok = (float(tot_rig) / cell_val) * 100
elif(lin == ccl - 1):
tot_rig = array_df[col][col]
per_ok = (float(tot_rig) / cell_val) * 100
per_err = 100 - per_ok
else:
per_ok = per_err = 0
per_ok_s = ['%.2f%%'%(per_ok), '100%'] [per_ok == 100]
#text to DEL
text_del.append(oText)
#text to ADD
font_prop = fm.FontProperties(weight='bold', size=fz)
text_kwargs = dict(color='w', ha="center", va="center", gid='sum', fontproperties=font_prop)
lis_txt = ['%d'%(cell_val), per_ok_s, '%.2f%%'%(per_err)]
lis_kwa = [text_kwargs]
dic = text_kwargs.copy(); dic['color'] = 'g'; lis_kwa.append(dic);
dic = text_kwargs.copy(); dic['color'] = 'r'; lis_kwa.append(dic);
lis_pos = [(oText._x, oText._y-0.3), (oText._x, oText._y), (oText._x, oText._y+0.3)]
for i in range(len(lis_txt)):
newText = dict(x=lis_pos[i][0], y=lis_pos[i][1], text=lis_txt[i], kw=lis_kwa[i])
#print ('lin: %s, col: %s, newText: %s' %(lin, col, newText))
text_add.append(newText)
#print '\n'
#set background color for sum cells (last line and last column)
carr = [0.0, 0.0, 0.0, 0.34]
if(col == ccl - 1) and (lin == ccl - 1):
carr = [0.0, 0.0, 0.0, 0.42]
facecolors[posi] = carr
else:
if(per > 0):
txt = '%s\n%.2f%%' %(cell_val, per)
else:
if(show_null_values == 0):
txt = ''
elif(show_null_values == 1):
txt = '0'
else:
txt = '0\n0.0%'
oText.set_text(txt)
#main diagonal
if(col == lin):
#set color of the textin the diagonal to white
oText.set_color('w')
# set background color in the diagonal to blue
facecolors[posi] = [0.35, 0.8, 0.55, 1.0]
else:
oText.set_color('r')
return text_add, text_del
#
def insert_totals(df_cm):
""" insert total column and line (the last ones) """
sum_col = []
for c in df_cm.columns:
sum_col.append( df_cm[c].sum() )
sum_lin = []
for item_line in df_cm.iterrows():
sum_lin.append( item_line[1].sum() )
df_cm['sum_row'] = sum_lin
sum_col.append(np.sum(sum_lin))
df_cm.loc['sum_col'] = sum_col
#print ('\ndf_cm:\n', df_cm, '\n\b\n')
#
def pretty_plot_confusion_matrix(df_cm, annot=True, cmap="Oranges", fmt='.2f', fz=11,
lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='y', title='Confusion matrix', path='cm.png'):
"""
print conf matrix with default layout (like matlab)
params:
df_cm dataframe (pandas) without totals
annot print text in each cell
cmap Oranges,Oranges_r,YlGnBu,Blues,RdBu, ... see:
fz fontsize
lw linewidth
pred_val_axis where to show the prediction values (x or y axis)
'col' or 'x': show predicted values in columns (x axis) instead lines
'lin' or 'y': show predicted values in lines (y axis)
"""
if(pred_val_axis in ('col', 'x')):
xlbl = 'Predicted'
ylbl = 'Actual'
else:
xlbl = 'Actual'
ylbl = 'Predicted'
df_cm = df_cm.T
# create "Total" column
insert_totals(df_cm)
#this is for print allways in the same window
fig, ax1 = get_new_fig('Conf matrix default', figsize)
#thanks for seaborn
ax = sn.heatmap(df_cm, annot=annot, annot_kws={"size": fz}, linewidths=lw, ax=ax1,
cbar=cbar, cmap=cmap, linecolor='w', fmt=fmt)
#set ticklabels rotation
ax.set_xticklabels(ax.get_xticklabels(), rotation = 60, fontsize = 10, ha='right')
ax.set_yticklabels(ax.get_yticklabels(), rotation = 25, fontsize = 10)
# Turn off all the ticks
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
#face colors list
quadmesh = ax.findobj(QuadMesh)[0]
facecolors = quadmesh.get_facecolors()
#iter in text elements
array_df = np.array( df_cm.to_records(index=False).tolist() )
text_add = []; text_del = [];
posi = -1 #from left to right, bottom to top.
for t in ax.collections[0].axes.texts: #ax.texts:
pos = np.array( t.get_position()) - [0.5,0.5]
lin = int(pos[1]); col = int(pos[0]);
posi += 1
#print('>>> pos: %s, posi: %s, val: %s, txt: %s' %(pos, posi, array_df[lin][col], t.get_text()))
#set text
txt_res = configcell_text_and_colors(array_df, lin, col, t, facecolors, posi, fz, fmt, show_null_values)
text_add.extend(txt_res[0])
text_del.extend(txt_res[1])
#remove the old ones
for item in text_del:
item.remove()
#append the new ones
for item in text_add:
ax.text(item['x'], item['y'], item['text'], **item['kw'])
#titles and legends
ax.set_title(title)
ax.set_xlabel(xlbl)
ax.set_ylabel(ylbl)
plt.tight_layout() #set layout slim
plt.savefig(path, facecolor='w')
plt.show(block=False)
plt.close('all')
#
def plot_confusion_matrix_from_data(y_test, predictions, columns=None, annot=True, cmap="Oranges",
fmt='.2f', fz=11, lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='lin'):
"""
plot confusion matrix function with y_test (actual values) and predictions (predic),
whitout a confusion matrix yet
"""
from sklearn.metrics import confusion_matrix
from pandas import DataFrame
#data
if(not columns):
#labels axis integer:
##columns = range(1, len(np.unique(y_test))+1)
#labels axis string:
from string import ascii_uppercase
columns = ['class %s' %(i) for i in list(ascii_uppercase)[0:len(np.unique(y_test))]]
confm = confusion_matrix(y_test, predictions)
cmap = 'Oranges';
fz = 11;
figsize=[9,9];
show_null_values = 2
df_cm = DataFrame(confm, index=columns, columns=columns)
pretty_plot_confusion_matrix(df_cm, fz=fz, cmap=cmap, figsize=figsize, show_null_values=show_null_values, pred_val_axis=pred_val_axis)
#
#
#TEST functions
#
def _test_cm():
#test function with confusion matrix done
array = np.array([[ 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 25, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 418, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 2, 0, 0, 0, 0, 0, 0, 2929, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 1, 0, 0, 0, 0, 0, 0, 165, 0, 0, 0],
[ 4, 0, 0, 0, 0, 0, 0, 873, 0, 6, 17]])
#get pandas dataframe
ind = ['B-Beschwerdeführer',
'B-Dritte',
'B-EGMR',
'B-Kommission/Kammer',
'B-Staat',
'I-Beschwerdeführer',
'I-Dritte',
'I-EGMR',
'I-Kommission/Kammer',
'I-Staat',
'O']
df_cm = DataFrame(array, index=ind, columns=ind)
#colormap: see this and choose your more dear
cmap = 'PuRd'
pretty_plot_confusion_matrix(df_cm, cmap=cmap,figsize=[11,11])
#
def _test_data_class():
""" test function with y_test (actual values) and predictions (predic) """
#data
y_test = np.array([1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5])
predic = np.array([1,2,4,3,5, 1,2,4,3,5, 1,2,3,4,4, 1,4,3,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,3,3,5, 1,2,3,3,5, 1,2,3,4,4, 1,2,3,4,1, 1,2,3,4,1, 1,2,3,4,1, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5])
"""
Examples to validate output (confusion matrix plot)
actual: 5 and prediction 1 >> 3
actual: 2 and prediction 4 >> 1
actual: 3 and prediction 4 >> 10
"""
columns = []
annot = True;
cmap = 'Oranges';
fmt = '.2f'
lw = 0.5
cbar = False
show_null_values = 2
pred_val_axis = 'y'
#size::
fz = 12;
figsize = [9,9];
if(len(y_test) > 10):
fz=9; figsize=[14,14];
plot_confusion_matrix_from_data(y_test, predic, columns,
annot, cmap, fmt, fz, lw, cbar, figsize, show_null_values, pred_val_axis)
#
#
#MAIN function
#
if(__name__ == '__main__'):
print('__main__')
print('_test_cm: test function with confusion matrix done\nand pause')
_test_cm()
plt.pause(5)
print('_test_data_class: test function with y_test (actual values) and predictions (predic)')
_test_data_class()
| 11,369 | 35.796117 | 265 | py |
imbalanced-learn | imbalanced-learn-master/conftest.py | # This file is here so that when running from the root folder
# ./imblearn is added to sys.path by pytest.
# See https://docs.pytest.org/en/latest/pythonpath.html for more details.
# For example, this allows to build extensions in place and run pytest
# doc/modules/clustering.rst and use imblearn from the local folder
# rather than the one from site-packages.
import os
import pytest
def pytest_runtest_setup(item):
fname = item.fspath.strpath
if (
fname.endswith(os.path.join("keras", "_generator.py"))
or fname.endswith(os.path.join("tensorflow", "_generator.py"))
or fname.endswith("miscellaneous.rst")
):
try:
import tensorflow # noqa
except ImportError:
pytest.skip("The tensorflow package is not installed.")
| 798 | 32.291667 | 73 | py |
imbalanced-learn | imbalanced-learn-master/setup.py | #! /usr/bin/env python
"""Toolbox for imbalanced dataset in machine learning."""
import codecs
import os
from setuptools import find_packages, setup
try:
import builtins
except ImportError:
# Python 2 compat: just to be able to declare that Python >=3.7 is needed.
import __builtin__ as builtins
# This is a bit (!) hackish: we are setting a global variable so that the
# main imblearn __init__ can detect if it is being loaded by the setup
# routine, to avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by imbalanced-learn to
# recursively build the compiled extensions in sub-packages is based on the
# Python import machinery.
builtins.__IMBLEARN_SETUP__ = True
import imblearn._min_dependencies as min_deps # noqa
# get __version__ from _version.py
ver_file = os.path.join("imblearn", "_version.py")
with open(ver_file) as f:
exec(f.read())
DISTNAME = "imbalanced-learn"
DESCRIPTION = "Toolbox for imbalanced dataset in machine learning."
with codecs.open("README.rst", encoding="utf-8-sig") as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = "G. Lemaitre, C. Aridas"
MAINTAINER_EMAIL = "g.lemaitre58@gmail.com, ichkoar@gmail.com"
URL = "https://github.com/scikit-learn-contrib/imbalanced-learn"
LICENSE = "MIT"
DOWNLOAD_URL = "https://github.com/scikit-learn-contrib/imbalanced-learn"
VERSION = __version__ # noqa
CLASSIFIERS = [
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
]
PYTHON_REQUIRES = ">=3.8"
INSTALL_REQUIRES = (min_deps.tag_to_packages["install"],)
EXTRAS_REQUIRE = {
key: value for key, value in min_deps.tag_to_packages.items() if key != "install"
}
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
| 2,646 | 32.0875 | 85 | py |
imbalanced-learn | imbalanced-learn-master/examples/pipeline/plot_pipeline_classification.py | """
====================================
Usage of pipeline embedding samplers
====================================
An example of the :class:~imblearn.pipeline.Pipeline` object (or
:func:`~imblearn.pipeline.make_pipeline` helper function) working with
transformers and resamplers.
"""
# Authors: Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Let's first create an imbalanced dataset and split in to two sets.
# %%
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(
n_classes=2,
class_sep=1.25,
weights=[0.3, 0.7],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=5,
n_clusters_per_class=1,
n_samples=5000,
random_state=10,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
# %% [markdown]
# Now, we will create each individual steps that we would like later to combine
# %%
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import EditedNearestNeighbours
pca = PCA(n_components=2)
enn = EditedNearestNeighbours()
smote = SMOTE(random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
# %% [markdown]
# Now, we can finally create a pipeline to specify in which order the different
# transformers and samplers should be executed before to provide the data to
# the final classifier.
# %%
from imblearn.pipeline import make_pipeline
model = make_pipeline(pca, enn, smote, knn)
# %% [markdown]
# We can now use the pipeline created as a normal classifier where resampling
# will happen when calling `fit` and disabled when calling `decision_function`,
# `predict_proba`, or `predict`.
# %%
from sklearn.metrics import classification_report
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
| 2,006 | 25.407895 | 86 | py |
imbalanced-learn | imbalanced-learn-master/examples/evaluation/plot_metrics.py | """
=======================================
Metrics specific to imbalanced learning
=======================================
Specific metrics have been developed to evaluate classifier which
has been trained using imbalanced data. :mod:`imblearn` provides mainly
two additional metrics which are not implemented in :mod:`sklearn`: (i)
geometric mean and (ii) index balanced accuracy.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
RANDOM_STATE = 42
# %% [markdown]
# First, we will generate some imbalanced dataset.
# %%
from sklearn.datasets import make_classification
X, y = make_classification(
n_classes=3,
class_sep=2,
weights=[0.1, 0.9],
n_informative=10,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=4,
n_samples=5000,
random_state=RANDOM_STATE,
)
# %% [markdown]
# We will split the data into a training and testing set.
# %%
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=RANDOM_STATE
)
# %% [markdown]
# We will create a pipeline made of a :class:`~imblearn.over_sampling.SMOTE`
# over-sampler followed by a :class:`~sklearn.linear_model.LogisticRegression`
# classifier.
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# %%
from imblearn.pipeline import make_pipeline
model = make_pipeline(
StandardScaler(),
SMOTE(random_state=RANDOM_STATE),
LogisticRegression(max_iter=10_000, random_state=RANDOM_STATE),
)
# %% [markdown]
# Now, we will train the model on the training set and get the prediction
# associated with the testing set. Be aware that the resampling will happen
# only when calling `fit`: the number of samples in `y_pred` is the same than
# in `y_test`.
# %%
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# %% [markdown]
# The geometric mean corresponds to the square root of the product of the
# sensitivity and specificity. Combining the two metrics should account for
# the balancing of the dataset.
# %%
from imblearn.metrics import geometric_mean_score
print(f"The geometric mean is {geometric_mean_score(y_test, y_pred):.3f}")
# %% [markdown]
# The index balanced accuracy can transform any metric to be used in
# imbalanced learning problems.
# %%
from imblearn.metrics import make_index_balanced_accuracy
alpha = 0.1
geo_mean = make_index_balanced_accuracy(alpha=alpha, squared=True)(geometric_mean_score)
print(
f"The IBA using alpha={alpha} and the geometric mean: "
f"{geo_mean(y_test, y_pred):.3f}"
)
# %%
alpha = 0.5
geo_mean = make_index_balanced_accuracy(alpha=alpha, squared=True)(geometric_mean_score)
print(
f"The IBA using alpha={alpha} and the geometric mean: "
f"{geo_mean(y_test, y_pred):.3f}"
)
| 2,900 | 25.135135 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/evaluation/plot_classification_report.py | """
=============================================
Evaluate classification by compiling a report
=============================================
Specific metrics have been developed to evaluate classifier which has been
trained using imbalanced data. :mod:`imblearn` provides a classification report
similar to :mod:`sklearn`, with additional metrics specific to imbalanced
learning problem.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from imblearn import over_sampling as os
from imblearn import pipeline as pl
from imblearn.metrics import classification_report_imbalanced
print(__doc__)
RANDOM_STATE = 42
# Generate a dataset
X, y = datasets.make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=10,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=4,
n_samples=5000,
random_state=RANDOM_STATE,
)
pipeline = pl.make_pipeline(
StandardScaler(),
os.SMOTE(random_state=RANDOM_STATE),
LogisticRegression(max_iter=10_000),
)
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=RANDOM_STATE)
# Train the classifier with balancing
pipeline.fit(X_train, y_train)
# Test the classifier and get the prediction
y_pred_bal = pipeline.predict(X_test)
# Show the classification report
print(classification_report_imbalanced(y_test, y_pred_bal))
| 1,584 | 25.416667 | 84 | py |
imbalanced-learn | imbalanced-learn-master/examples/combine/plot_comparison_combine.py | """
==================================================
Compare sampler combining over- and under-sampling
==================================================
This example shows the effect of applying an under-sampling algorithms after
SMOTE over-sampling. In the literature, Tomek's link and edited nearest
neighbours are the two methods which have been used and are available in
imbalanced-learn.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# Dataset generation
# ------------------
#
# We will create an imbalanced dataset with a couple of samples. We will use
# :func:`~sklearn.datasets.make_classification` to generate this dataset.
# %%
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=100,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.1, 0.2, 0.7],
class_sep=0.8,
random_state=0,
)
# %%
_, ax = plt.subplots(figsize=(6, 6))
_ = ax.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8, edgecolor="k")
# %% [markdown]
# The following function will be used to plot the sample space after resampling
# to illustrate the characteristic of an algorithm.
# %%
from collections import Counter
def plot_resampling(X, y, sampler, ax):
"""Plot the resampled dataset using the sampler."""
X_res, y_res = sampler.fit_resample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor="k")
sns.despine(ax=ax, offset=10)
ax.set_title(f"Decision function for {sampler.__class__.__name__}")
return Counter(y_res)
# %% [markdown]
# The following function will be used to plot the decision function of a
# classifier given some data.
# %%
import numpy as np
def plot_decision_function(X, y, clf, ax):
"""Plot the decision function of the classifier and the original data"""
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor="k")
ax.set_title(f"Resampling using {clf[0].__class__.__name__}")
# %% [markdown]
# :class:`~imblearn.over_sampling.SMOTE` allows to generate samples. However,
# this method of over-sampling does not have any knowledge regarding the
# underlying distribution. Therefore, some noisy samples can be generated, e.g.
# when the different classes cannot be well separated. Hence, it can be
# beneficial to apply an under-sampling algorithm to clean the noisy samples.
# Two methods are usually used in the literature: (i) Tomek's link and (ii)
# edited nearest neighbours cleaning methods. Imbalanced-learn provides two
# ready-to-use samplers :class:`~imblearn.combine.SMOTETomek` and
# :class:`~imblearn.combine.SMOTEENN`. In general,
# :class:`~imblearn.combine.SMOTEENN` cleans more noisy data than
# :class:`~imblearn.combine.SMOTETomek`.
from sklearn.linear_model import LogisticRegression
from imblearn.combine import SMOTEENN, SMOTETomek
# %%
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import make_pipeline
samplers = [SMOTE(random_state=0), SMOTEENN(random_state=0), SMOTETomek(random_state=0)]
fig, axs = plt.subplots(3, 2, figsize=(15, 25))
for ax, sampler in zip(axs, samplers):
clf = make_pipeline(sampler, LogisticRegression()).fit(X, y)
plot_decision_function(X, y, clf, ax[0])
plot_resampling(X, y, sampler, ax[1])
fig.tight_layout()
plt.show()
| 3,820 | 30.065041 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/model_selection/plot_validation_curve.py | """
==========================
Plotting Validation Curves
==========================
In this example the impact of the :class:`~imblearn.over_sampling.SMOTE`'s
`k_neighbors` parameter is examined. In the plot you can see the validation
scores of a SMOTE-CART classifier for different values of the
:class:`~imblearn.over_sampling.SMOTE`'s `k_neighbors` parameter.
"""
# Authors: Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
RANDOM_STATE = 42
# %% [markdown]
# Let's first generate a dataset with imbalanced class distribution.
# %%
from sklearn.datasets import make_classification
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=10,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=4,
n_samples=5000,
random_state=RANDOM_STATE,
)
# %% [markdown]
# We will use an over-sampler :class:`~imblearn.over_sampling.SMOTE` followed
# by a :class:`~sklearn.tree.DecisionTreeClassifier`. The aim will be to
# search which `k_neighbors` parameter is the most adequate with the dataset
# that we generated.
from sklearn.tree import DecisionTreeClassifier
# %%
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import make_pipeline
model = make_pipeline(
SMOTE(random_state=RANDOM_STATE), DecisionTreeClassifier(random_state=RANDOM_STATE)
)
# %% [markdown]
# We can use the :class:`~sklearn.model_selection.validation_curve` to inspect
# the impact of varying the parameter `k_neighbors`. In this case, we need
# to use a score to evaluate the generalization score during the
# cross-validation.
# %%
from sklearn.metrics import cohen_kappa_score, make_scorer
from sklearn.model_selection import validation_curve
scorer = make_scorer(cohen_kappa_score)
param_range = range(1, 11)
train_scores, test_scores = validation_curve(
model,
X,
y,
param_name="smote__k_neighbors",
param_range=param_range,
cv=3,
scoring=scorer,
)
# %%
train_scores_mean = train_scores.mean(axis=1)
train_scores_std = train_scores.std(axis=1)
test_scores_mean = test_scores.mean(axis=1)
test_scores_std = test_scores.std(axis=1)
# %% [markdown]
# We can now plot the results of the cross-validation for the different
# parameter values that we tried.
# %%
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(7, 7))
ax.plot(param_range, test_scores_mean, label="SMOTE")
ax.fill_between(
param_range,
test_scores_mean + test_scores_std,
test_scores_mean - test_scores_std,
alpha=0.2,
)
idx_max = test_scores_mean.argmax()
ax.scatter(
param_range[idx_max],
test_scores_mean[idx_max],
label=r"Cohen Kappa: ${:.2f}\pm{:.2f}$".format(
test_scores_mean[idx_max], test_scores_std[idx_max]
),
)
fig.suptitle("Validation Curve with SMOTE-CART")
ax.set_xlabel("Number of neighbors")
ax.set_ylabel("Cohen's kappa")
# make nice plotting
sns.despine(ax=ax, offset=10)
ax.set_xlim([1, 10])
ax.set_ylim([0.4, 0.8])
ax.legend(loc="lower right", fontsize=16)
plt.tight_layout()
plt.show()
| 3,153 | 24.435484 | 87 | py |
imbalanced-learn | imbalanced-learn-master/examples/api/plot_sampling_strategy_usage.py | """
====================================================
How to use ``sampling_strategy`` in imbalanced-learn
====================================================
This example shows the different usage of the parameter ``sampling_strategy``
for the different family of samplers (i.e. over-sampling, under-sampling. or
cleaning methods).
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# Create an imbalanced dataset
# ----------------------------
#
# First, we will create an imbalanced data set from a the iris data set.
# %%
from sklearn.datasets import load_iris
from imblearn.datasets import make_imbalance
iris = load_iris(as_frame=True)
sampling_strategy = {0: 10, 1: 20, 2: 47}
X, y = make_imbalance(iris.data, iris.target, sampling_strategy=sampling_strategy)
# %%
import matplotlib.pyplot as plt
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
autopct = "%.2f"
iris.target.value_counts().plot.pie(autopct=autopct, ax=axs[0])
axs[0].set_title("Original")
y.value_counts().plot.pie(autopct=autopct, ax=axs[1])
axs[1].set_title("Imbalanced")
fig.tight_layout()
# %% [markdown]
# Using ``sampling_strategy`` in resampling algorithms
# ====================================================
#
# `sampling_strategy` as a `float`
# --------------------------------
#
# `sampling_strategy` can be given a `float`. For **under-sampling
# methods**, it corresponds to the ratio :math:`\alpha_{us}` defined by
# :math:`N_{rM} = \alpha_{us} \times N_{m}` where :math:`N_{rM}` and
# :math:`N_{m}` are the number of samples in the majority class after
# resampling and the number of samples in the minority class, respectively.
# %%
# select only 2 classes since the ratio make sense in this case
binary_mask = y.isin([0, 1])
binary_y = y[binary_mask]
binary_X = X[binary_mask]
# %%
from imblearn.under_sampling import RandomUnderSampler
sampling_strategy = 0.8
rus = RandomUnderSampler(sampling_strategy=sampling_strategy)
X_res, y_res = rus.fit_resample(binary_X, binary_y)
ax = y_res.value_counts().plot.pie(autopct=autopct)
_ = ax.set_title("Under-sampling")
# %% [markdown]
# For **over-sampling methods**, it correspond to the ratio
# :math:`\alpha_{os}` defined by :math:`N_{rm} = \alpha_{os} \times N_{M}`
# where :math:`N_{rm}` and :math:`N_{M}` are the number of samples in the
# minority class after resampling and the number of samples in the majority
# class, respectively.
# %%
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(sampling_strategy=sampling_strategy)
X_res, y_res = ros.fit_resample(binary_X, binary_y)
ax = y_res.value_counts().plot.pie(autopct=autopct)
_ = ax.set_title("Over-sampling")
# %% [markdown]
# `sampling_strategy` as a `str`
# -------------------------------
#
# `sampling_strategy` can be given as a string which specify the class
# targeted by the resampling. With under- and over-sampling, the number of
# samples will be equalized.
#
# Note that we are using multiple classes from now on.
# %%
sampling_strategy = "not minority"
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
rus = RandomUnderSampler(sampling_strategy=sampling_strategy)
X_res, y_res = rus.fit_resample(X, y)
y_res.value_counts().plot.pie(autopct=autopct, ax=axs[0])
axs[0].set_title("Under-sampling")
sampling_strategy = "not majority"
ros = RandomOverSampler(sampling_strategy=sampling_strategy)
X_res, y_res = ros.fit_resample(X, y)
y_res.value_counts().plot.pie(autopct=autopct, ax=axs[1])
_ = axs[1].set_title("Over-sampling")
# %% [markdown]
# With **cleaning method**, the number of samples in each class will not be
# equalized even if targeted.
# %%
from imblearn.under_sampling import TomekLinks
sampling_strategy = "not minority"
tl = TomekLinks(sampling_strategy=sampling_strategy)
X_res, y_res = tl.fit_resample(X, y)
ax = y_res.value_counts().plot.pie(autopct=autopct)
_ = ax.set_title("Cleaning")
# %% [markdown]
# `sampling_strategy` as a `dict`
# ------------------------------
#
# When `sampling_strategy` is a `dict`, the keys correspond to the targeted
# classes. The values correspond to the desired number of samples for each
# targeted class. This is working for both **under- and over-sampling**
# algorithms but not for the **cleaning algorithms**. Use a `list` instead.
# %%
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
sampling_strategy = {0: 10, 1: 15, 2: 20}
rus = RandomUnderSampler(sampling_strategy=sampling_strategy)
X_res, y_res = rus.fit_resample(X, y)
y_res.value_counts().plot.pie(autopct=autopct, ax=axs[0])
axs[0].set_title("Under-sampling")
sampling_strategy = {0: 25, 1: 35, 2: 47}
ros = RandomOverSampler(sampling_strategy=sampling_strategy)
X_res, y_res = ros.fit_resample(X, y)
y_res.value_counts().plot.pie(autopct=autopct, ax=axs[1])
_ = axs[1].set_title("Under-sampling")
# %% [markdown]
# `sampling_strategy` as a `list`
# -------------------------------
#
# When `sampling_strategy` is a `list`, the list contains the targeted
# classes. It is used only for **cleaning methods** and raise an error
# otherwise.
# %%
sampling_strategy = [0, 1, 2]
tl = TomekLinks(sampling_strategy=sampling_strategy)
X_res, y_res = tl.fit_resample(X, y)
ax = y_res.value_counts().plot.pie(autopct=autopct)
_ = ax.set_title("Cleaning")
# %% [markdown]
# `sampling_strategy` as a callable
# ---------------------------------
#
# When callable, function taking `y` and returns a `dict`. The keys
# correspond to the targeted classes. The values correspond to the desired
# number of samples for each class.
# %%
def ratio_multiplier(y):
from collections import Counter
multiplier = {1: 0.7, 2: 0.95}
target_stats = Counter(y)
for key, value in target_stats.items():
if key in multiplier:
target_stats[key] = int(value * multiplier[key])
return target_stats
X_res, y_res = RandomUnderSampler(sampling_strategy=ratio_multiplier).fit_resample(X, y)
ax = y_res.value_counts().plot.pie(autopct=autopct)
ax.set_title("Under-sampling")
plt.show()
| 6,099 | 30.443299 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/datasets/plot_make_imbalance.py | """
============================
Create an imbalanced dataset
============================
An illustration of the :func:`~imblearn.datasets.make_imbalance` function to
create an imbalanced dataset from a balanced dataset. We show the ability of
:func:`~imblearn.datasets.make_imbalance` of dealing with Pandas DataFrame.
"""
# Authors: Dayvid Oliveira
# Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# Generate the dataset
# --------------------
#
# First, we will generate a dataset and convert it to a
# :class:`~pandas.DataFrame` with arbitrary column names. We will plot the
# original dataset.
# %%
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=200, shuffle=True, noise=0.5, random_state=10)
X = pd.DataFrame(X, columns=["feature 1", "feature 2"])
ax = X.plot.scatter(
x="feature 1",
y="feature 2",
c=y,
colormap="viridis",
colorbar=False,
)
sns.despine(ax=ax, offset=10)
plt.tight_layout()
# %% [markdown]
# Make a dataset imbalanced
# -------------------------
#
# Now, we will show the helpers :func:`~imblearn.datasets.make_imbalance`
# that is useful to random select a subset of samples. It will impact the
# class distribution as specified by the parameters.
# %%
from collections import Counter
def ratio_func(y, multiplier, minority_class):
target_stats = Counter(y)
return {minority_class: int(multiplier * target_stats[minority_class])}
# %%
from imblearn.datasets import make_imbalance
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 10))
X.plot.scatter(
x="feature 1",
y="feature 2",
c=y,
ax=axs[0, 0],
colormap="viridis",
colorbar=False,
)
axs[0, 0].set_title("Original set")
sns.despine(ax=axs[0, 0], offset=10)
multipliers = [0.9, 0.75, 0.5, 0.25, 0.1]
for ax, multiplier in zip(axs.ravel()[1:], multipliers):
X_resampled, y_resampled = make_imbalance(
X,
y,
sampling_strategy=ratio_func,
**{"multiplier": multiplier, "minority_class": 1},
)
X_resampled.plot.scatter(
x="feature 1",
y="feature 2",
c=y_resampled,
ax=ax,
colormap="viridis",
colorbar=False,
)
ax.set_title(f"Sampling ratio = {multiplier}")
sns.despine(ax=ax, offset=10)
plt.tight_layout()
plt.show()
| 2,474 | 23.264706 | 76 | py |
imbalanced-learn | imbalanced-learn-master/examples/over-sampling/plot_shrinkage_effect.py | """
======================================================
Effect of the shrinkage factor in random over-sampling
======================================================
This example shows the effect of the shrinkage factor used to generate the
smoothed bootstrap using the
:class:`~imblearn.over_sampling.RandomOverSampler`.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %%
# First, we will generate a toy classification dataset with only few samples.
# The ratio between the classes will be imbalanced.
from collections import Counter
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=100,
n_features=2,
n_redundant=0,
weights=[0.1, 0.9],
random_state=0,
)
Counter(y)
# %%
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# Now, we will use a :class:`~imblearn.over_sampling.RandomOverSampler` to
# generate a bootstrap for the minority class with as many samples as in the
# majority class.
from imblearn.over_sampling import RandomOverSampler
sampler = RandomOverSampler(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# We observe that the minority samples are less transparent than the samples
# from the majority class. Indeed, it is due to the fact that these samples
# of the minority class are repeated during the bootstrap generation.
#
# We can set `shrinkage` to a floating value to add a small perturbation to the
# samples created and therefore create a smoothed bootstrap.
sampler = RandomOverSampler(shrinkage=1, random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# In this case, we see that the samples in the minority class are not
# overlapping anymore due to the added noise.
#
# The parameter `shrinkage` allows to add more or less perturbation. Let's
# add more perturbation when generating the smoothed bootstrap.
sampler = RandomOverSampler(shrinkage=3, random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# Increasing the value of `shrinkage` will disperse the new samples. Forcing
# the shrinkage to 0 will be equivalent to generating a normal bootstrap.
sampler = RandomOverSampler(shrinkage=0, random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# Therefore, the `shrinkage` is handy to manually tune the dispersion of the
# new samples.
| 3,956 | 30.656 | 87 | py |
imbalanced-learn | imbalanced-learn-master/examples/over-sampling/plot_illustration_generation_sample.py | """
============================================
Sample generator used in SMOTE-like samplers
============================================
This example illustrates how a new sample is generated taking into account the
neighbourhood of this sample. A new sample is generated by selecting the
randomly 2 samples of the same class and interpolating a point between these
samples.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set_context("poster")
rng = np.random.RandomState(18)
f, ax = plt.subplots(figsize=(8, 8))
# generate some data points
y = np.array([3.65284, 3.52623, 3.51468, 3.22199, 3.21])
z = np.array([0.43, 0.45, 0.6, 0.4, 0.211])
y_2 = np.array([3.3, 3.6])
z_2 = np.array([0.58, 0.34])
# plot the majority and minority samples
ax.scatter(z, y, label="Minority class", s=100)
ax.scatter(z_2, y_2, label="Majority class", s=100)
idx = rng.randint(len(y), size=2)
annotation = [r"$x_i$", r"$x_{zi}$"]
for a, i in zip(annotation, idx):
ax.annotate(a, (z[i], y[i]), xytext=tuple([z[i] + 0.01, y[i] + 0.005]), fontsize=15)
# draw the circle in which the new sample will generated
radius = np.sqrt((z[idx[0]] - z[idx[1]]) ** 2 + (y[idx[0]] - y[idx[1]]) ** 2)
circle = plt.Circle((z[idx[0]], y[idx[0]]), radius=radius, alpha=0.2)
ax.add_artist(circle)
# plot the line on which the sample will be generated
ax.plot(z[idx], y[idx], "--", alpha=0.5)
# create and plot the new sample
step = rng.uniform()
y_gen = y[idx[0]] + step * (y[idx[1]] - y[idx[0]])
z_gen = z[idx[0]] + step * (z[idx[1]] - z[idx[0]])
ax.scatter(z_gen, y_gen, s=100)
ax.annotate(
r"$x_{new}$",
(z_gen, y_gen),
xytext=tuple([z_gen + 0.01, y_gen + 0.005]),
fontsize=15,
)
# make the plot nicer with legend and label
sns.despine(ax=ax, offset=10)
ax.set_xlim([0.2, 0.7])
ax.set_ylim([3.2, 3.7])
plt.xlabel(r"$X_1$")
plt.ylabel(r"$X_2$")
plt.legend()
plt.tight_layout()
plt.show()
| 2,010 | 26.547945 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/over-sampling/plot_comparison_over_sampling.py | """
==============================
Compare over-sampling samplers
==============================
The following example attends to make a qualitative comparison between the
different over-sampling algorithms available in the imbalanced-learn package.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# The following function will be used to create toy dataset. It uses the
# :func:`~sklearn.datasets.make_classification` from scikit-learn but fixing
# some parameters.
# %%
from sklearn.datasets import make_classification
def create_dataset(
n_samples=1000,
weights=(0.01, 0.01, 0.98),
n_classes=3,
class_sep=0.8,
n_clusters=1,
):
return make_classification(
n_samples=n_samples,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=n_clusters,
weights=list(weights),
class_sep=class_sep,
random_state=0,
)
# %% [markdown]
# The following function will be used to plot the sample space after resampling
# to illustrate the specificities of an algorithm.
# %%
def plot_resampling(X, y, sampler, ax, title=None):
X_res, y_res = sampler.fit_resample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor="k")
if title is None:
title = f"Resampling with {sampler.__class__.__name__}"
ax.set_title(title)
sns.despine(ax=ax, offset=10)
# %% [markdown]
# The following function will be used to plot the decision function of a
# classifier given some data.
# %%
import numpy as np
def plot_decision_function(X, y, clf, ax, title=None):
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor="k")
if title is not None:
ax.set_title(title)
# %% [markdown]
# Illustration of the influence of the balancing ratio
# ----------------------------------------------------
#
# We will first illustrate the influence of the balancing ratio on some toy
# data using a logistic regression classifier which is a linear model.
# %%
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
# %% [markdown]
# We will fit and show the decision boundary model to illustrate the impact of
# dealing with imbalanced classes.
# %%
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 12))
weights_arr = (
(0.01, 0.01, 0.98),
(0.01, 0.05, 0.94),
(0.2, 0.1, 0.7),
(0.33, 0.33, 0.33),
)
for ax, weights in zip(axs.ravel(), weights_arr):
X, y = create_dataset(n_samples=300, weights=weights)
clf.fit(X, y)
plot_decision_function(X, y, clf, ax, title=f"weight={weights}")
fig.suptitle(f"Decision function of {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# Greater is the difference between the number of samples in each class, poorer
# are the classification results.
#
# Random over-sampling to balance the data set
# --------------------------------------------
#
# Random over-sampling can be used to repeat some samples and balance the
# number of samples between the dataset. It can be seen that with this trivial
# approach the boundary decision is already less biased toward the majority
# class. The class :class:`~imblearn.over_sampling.RandomOverSampler`
# implements such of a strategy.
from imblearn.over_sampling import RandomOverSampler
# %%
from imblearn.pipeline import make_pipeline
X, y = create_dataset(n_samples=100, weights=(0.05, 0.25, 0.7))
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
clf.fit(X, y)
plot_decision_function(X, y, clf, axs[0], title="Without resampling")
sampler = RandomOverSampler(random_state=0)
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(X, y, model, axs[1], f"Using {model[0].__class__.__name__}")
fig.suptitle(f"Decision function of {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# By default, random over-sampling generates a bootstrap. The parameter
# `shrinkage` allows adding a small perturbation to the generated data
# to generate a smoothed bootstrap instead. The plot below shows the difference
# between the two data generation strategies.
# %%
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
sampler.set_params(shrinkage=None)
plot_resampling(X, y, sampler, ax=axs[0], title="Normal bootstrap")
sampler.set_params(shrinkage=0.3)
plot_resampling(X, y, sampler, ax=axs[1], title="Smoothed bootstrap")
fig.suptitle(f"Resampling with {sampler.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# It looks like more samples are generated with smoothed bootstrap. This is due
# to the fact that the samples generated are not superimposing with the
# original samples.
#
# More advanced over-sampling using ADASYN and SMOTE
# --------------------------------------------------
#
# Instead of repeating the same samples when over-sampling or perturbating the
# generated bootstrap samples, one can use some specific heuristic instead.
# :class:`~imblearn.over_sampling.ADASYN` and
# :class:`~imblearn.over_sampling.SMOTE` can be used in this case.
# %%
from imblearn import FunctionSampler # to use a idendity sampler
from imblearn.over_sampling import ADASYN, SMOTE
X, y = create_dataset(n_samples=150, weights=(0.1, 0.2, 0.7))
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
samplers = [
FunctionSampler(),
RandomOverSampler(random_state=0),
SMOTE(random_state=0),
ADASYN(random_state=0),
]
for ax, sampler in zip(axs.ravel(), samplers):
title = "Original dataset" if isinstance(sampler, FunctionSampler) else None
plot_resampling(X, y, sampler, ax, title=title)
fig.tight_layout()
# %% [markdown]
# The following plot illustrates the difference between
# :class:`~imblearn.over_sampling.ADASYN` and
# :class:`~imblearn.over_sampling.SMOTE`.
# :class:`~imblearn.over_sampling.ADASYN` will focus on the samples which are
# difficult to classify with a nearest-neighbors rule while regular
# :class:`~imblearn.over_sampling.SMOTE` will not make any distinction.
# Therefore, the decision function depending of the algorithm.
X, y = create_dataset(n_samples=150, weights=(0.05, 0.25, 0.7))
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(20, 6))
models = {
"Without sampler": clf,
"ADASYN sampler": make_pipeline(ADASYN(random_state=0), clf),
"SMOTE sampler": make_pipeline(SMOTE(random_state=0), clf),
}
for ax, (title, model) in zip(axs, models.items()):
model.fit(X, y)
plot_decision_function(X, y, model, ax=ax, title=title)
fig.suptitle(f"Decision function using a {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# Due to those sampling particularities, it can give rise to some specific
# issues as illustrated below.
# %%
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94), class_sep=0.8)
samplers = [SMOTE(random_state=0), ADASYN(random_state=0)]
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function with {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.suptitle("Particularities of over-sampling with SMOTE and ADASYN")
fig.tight_layout()
# %% [markdown]
# SMOTE proposes several variants by identifying specific samples to consider
# during the resampling. The borderline version
# (:class:`~imblearn.over_sampling.BorderlineSMOTE`) will detect which point to
# select which are in the border between two classes. The SVM version
# (:class:`~imblearn.over_sampling.SVMSMOTE`) will use the support vectors
# found using an SVM algorithm to create new sample while the KMeans version
# (:class:`~imblearn.over_sampling.KMeansSMOTE`) will make a clustering before
# to generate samples in each cluster independently depending each cluster
# density.
# %%
from sklearn.cluster import MiniBatchKMeans
from imblearn.over_sampling import SVMSMOTE, BorderlineSMOTE, KMeansSMOTE
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94), class_sep=0.8)
fig, axs = plt.subplots(5, 2, figsize=(15, 30))
samplers = [
SMOTE(random_state=0),
BorderlineSMOTE(random_state=0, kind="borderline-1"),
BorderlineSMOTE(random_state=0, kind="borderline-2"),
KMeansSMOTE(
kmeans_estimator=MiniBatchKMeans(n_clusters=10, n_init=1, random_state=0),
random_state=0,
),
SVMSMOTE(random_state=0),
]
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function for {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.suptitle("Decision function and resampling using SMOTE variants")
fig.tight_layout()
# %% [markdown]
# When dealing with a mixed of continuous and categorical features,
# :class:`~imblearn.over_sampling.SMOTENC` is the only method which can handle
# this case.
# %%
from collections import Counter
from imblearn.over_sampling import SMOTENC
rng = np.random.RandomState(42)
n_samples = 50
# Create a dataset of a mix of numerical and categorical data
X = np.empty((n_samples, 3), dtype=object)
X[:, 0] = rng.choice(["A", "B", "C"], size=n_samples).astype(object)
X[:, 1] = rng.randn(n_samples)
X[:, 2] = rng.randint(3, size=n_samples)
y = np.array([0] * 20 + [1] * 30)
print("The original imbalanced dataset")
print(sorted(Counter(y).items()))
print()
print("The first and last columns are containing categorical features:")
print(X[:5])
print()
smote_nc = SMOTENC(categorical_features=[0, 2], random_state=0)
X_resampled, y_resampled = smote_nc.fit_resample(X, y)
print("Dataset after resampling:")
print(sorted(Counter(y_resampled).items()))
print()
print("SMOTE-NC will generate categories for the categorical features:")
print(X_resampled[-5:])
print()
# %% [markdown]
# However, if the dataset is composed of only categorical features then one
# should use :class:`~imblearn.over_sampling.SMOTEN`.
# %%
from imblearn.over_sampling import SMOTEN
# Generate only categorical data
X = np.array(["A"] * 10 + ["B"] * 20 + ["C"] * 30, dtype=object).reshape(-1, 1)
y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
print(f"Original class counts: {Counter(y)}")
print()
print(X[:5])
print()
sampler = SMOTEN(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
print(f"Class counts after resampling {Counter(y_res)}")
print()
print(X_res[-5:])
print()
| 10,987 | 30.304843 | 86 | py |
imbalanced-learn | imbalanced-learn-master/examples/under-sampling/plot_illustration_nearmiss.py | """
============================
Sample selection in NearMiss
============================
This example illustrates the different way of selecting example in
:class:`~imblearn.under_sampling.NearMiss`.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# We define a function allowing to make some nice decoration on the plot.
# %%
def make_plot_despine(ax):
sns.despine(ax=ax, offset=10)
ax.set_xlim([0, 3.5])
ax.set_ylim([0, 3.5])
ax.set_xticks(np.arange(0, 3.6, 0.5))
ax.set_yticks(np.arange(0, 3.6, 0.5))
ax.set_xlabel(r"$X_1$")
ax.set_ylabel(r"$X_2$")
ax.legend(loc="upper left", fontsize=16)
# %% [markdown]
# We can start by generating some data to later illustrate the principle of
# each :class:`~imblearn.under_sampling.NearMiss` heuristic rules.
# %%
import numpy as np
rng = np.random.RandomState(18)
X_minority = np.transpose(
[[1.1, 1.3, 1.15, 0.8, 0.8, 0.6, 0.55], [1.0, 1.5, 1.7, 2.5, 2.0, 1.2, 0.55]]
)
X_majority = np.transpose(
[
[2.1, 2.12, 2.13, 2.14, 2.2, 2.3, 2.5, 2.45],
[1.5, 2.1, 2.7, 0.9, 1.0, 1.4, 2.4, 2.9],
]
)
# %% [mardown]
# NearMiss-1
# ----------
#
# NearMiss-1 selects samples from the majority class for which the average
# distance to some nearest neighbours is the smallest. In the following
# example, we use a 3-NN to compute the average distance on 2 specific samples
# of the majority class. Therefore, in this case the point linked by the
# green-dashed line will be selected since the average distance is smaller.
# %%
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
nearest_neighbors = NearestNeighbors(n_neighbors=3)
nearest_neighbors.fit(X_minority)
dist, ind = nearest_neighbors.kneighbors(X_majority[:2, :])
dist_avg = dist.sum(axis=1) / 3
for positive_idx, (neighbors, distance, color) in enumerate(
zip(ind, dist_avg, ["g", "r"])
):
for make_plot, sample_idx in enumerate(neighbors):
ax.plot(
[X_majority[positive_idx, 0], X_minority[sample_idx, 0]],
[X_majority[positive_idx, 1], X_minority[sample_idx, 1]],
"--" + color,
alpha=0.3,
label=f"Avg. dist.={distance:.2f}" if make_plot == 0 else "",
)
ax.set_title("NearMiss-1")
make_plot_despine(ax)
plt.tight_layout()
# %% [mardown]
# NearMiss-2
# ----------
#
# NearMiss-2 selects samples from the majority class for which the average
# distance to the farthest neighbors is the smallest. With the same
# configuration as previously presented, the sample linked to the green-dashed
# line will be selected since its distance the 3 farthest neighbors is the
# smallest.
# %%
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
nearest_neighbors = NearestNeighbors(n_neighbors=X_minority.shape[0])
nearest_neighbors.fit(X_minority)
dist, ind = nearest_neighbors.kneighbors(X_majority[:2, :])
dist = dist[:, -3::]
ind = ind[:, -3::]
dist_avg = dist.sum(axis=1) / 3
for positive_idx, (neighbors, distance, color) in enumerate(
zip(ind, dist_avg, ["g", "r"])
):
for make_plot, sample_idx in enumerate(neighbors):
ax.plot(
[X_majority[positive_idx, 0], X_minority[sample_idx, 0]],
[X_majority[positive_idx, 1], X_minority[sample_idx, 1]],
"--" + color,
alpha=0.3,
label=f"Avg. dist.={distance:.2f}" if make_plot == 0 else "",
)
ax.set_title("NearMiss-2")
make_plot_despine(ax)
plt.tight_layout()
# %% [mardown]
# NearMiss-3
# ----------
#
# NearMiss-3 can be divided into 2 steps. First, a nearest-neighbors is used to
# short-list samples from the majority class (i.e. correspond to the
# highlighted samples in the following plot). Then, the sample with the largest
# average distance to the *k* nearest-neighbors are selected.
# %%
fig, ax = plt.subplots(figsize=(8.5, 8.5))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
nearest_neighbors = NearestNeighbors(n_neighbors=3)
nearest_neighbors.fit(X_majority)
# select only the majority point of interest
selected_idx = nearest_neighbors.kneighbors(X_minority, return_distance=False)
X_majority = X_majority[np.unique(selected_idx), :]
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Short-listed samples",
s=200,
alpha=0.3,
color="g",
)
nearest_neighbors = NearestNeighbors(n_neighbors=3)
nearest_neighbors.fit(X_minority)
dist, ind = nearest_neighbors.kneighbors(X_majority[:2, :])
dist_avg = dist.sum(axis=1) / 3
for positive_idx, (neighbors, distance, color) in enumerate(
zip(ind, dist_avg, ["r", "g"])
):
for make_plot, sample_idx in enumerate(neighbors):
ax.plot(
[X_majority[positive_idx, 0], X_minority[sample_idx, 0]],
[X_majority[positive_idx, 1], X_minority[sample_idx, 1]],
"--" + color,
alpha=0.3,
label=f"Avg. dist.={distance:.2f}" if make_plot == 0 else "",
)
ax.set_title("NearMiss-3")
make_plot_despine(ax)
plt.tight_layout()
plt.show()
| 5,767 | 25.827907 | 81 | py |
imbalanced-learn | imbalanced-learn-master/examples/under-sampling/plot_illustration_tomek_links.py | """
==============================================
Illustration of the definition of a Tomek link
==============================================
This example illustrates what is a Tomek link.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# This function allows to make nice plotting
# %%
def make_plot_despine(ax):
sns.despine(ax=ax, offset=10)
ax.set_xlim([0, 3])
ax.set_ylim([0, 3])
ax.set_xlabel(r"$X_1$")
ax.set_ylabel(r"$X_2$")
ax.legend(loc="lower right")
# %% [markdown]
# We will generate some toy data that illustrates how
# :class:`~imblearn.under_sampling.TomekLinks` is used to clean a dataset.
# %%
import numpy as np
rng = np.random.RandomState(18)
X_minority = np.transpose(
[[1.1, 1.3, 1.15, 0.8, 0.55, 2.1], [1.0, 1.5, 1.7, 2.5, 0.55, 1.9]]
)
X_majority = np.transpose(
[
[2.1, 2.12, 2.13, 2.14, 2.2, 2.3, 2.5, 2.45],
[1.5, 2.1, 2.7, 0.9, 1.0, 1.4, 2.4, 2.9],
]
)
# %% [markdown]
# In the figure above, the samples highlighted in green form a Tomek link since
# they are of different classes and are nearest neighbors of each other.
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
make_plot_despine(ax)
fig.suptitle("Illustration of a Tomek link")
fig.tight_layout()
# %% [markdown]
# We can run the :class:`~imblearn.under_sampling.TomekLinks` sampling to
# remove the corresponding samples. If `sampling_strategy='auto'` only the
# sample from the majority class will be removed. If `sampling_strategy='all'`
# both samples will be removed.
# %%
from imblearn.under_sampling import TomekLinks
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
samplers = {
"Removing only majority samples": TomekLinks(sampling_strategy="auto"),
"Removing all samples": TomekLinks(sampling_strategy="all"),
}
for ax, (title, sampler) in zip(axs, samplers.items()):
X_res, y_res = sampler.fit_resample(
np.vstack((X_minority, X_majority)),
np.array([0] * X_minority.shape[0] + [1] * X_majority.shape[0]),
)
ax.scatter(
X_res[y_res == 0][:, 0],
X_res[y_res == 0][:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_res[y_res == 1][:, 0],
X_res[y_res == 1][:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
ax.set_title(title)
make_plot_despine(ax)
fig.tight_layout()
plt.show()
| 3,180 | 22.389706 | 79 | py |
imbalanced-learn | imbalanced-learn-master/examples/under-sampling/plot_comparison_under_sampling.py | """
===============================
Compare under-sampling samplers
===============================
The following example attends to make a qualitative comparison between the
different under-sampling algorithms available in the imbalanced-learn package.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# The following function will be used to create toy dataset. It uses the
# :func:`~sklearn.datasets.make_classification` from scikit-learn but fixing
# some parameters.
# %%
from sklearn.datasets import make_classification
def create_dataset(
n_samples=1000,
weights=(0.01, 0.01, 0.98),
n_classes=3,
class_sep=0.8,
n_clusters=1,
):
return make_classification(
n_samples=n_samples,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=n_clusters,
weights=list(weights),
class_sep=class_sep,
random_state=0,
)
# %% [markdown]
# The following function will be used to plot the sample space after resampling
# to illustrate the specificities of an algorithm.
# %%
def plot_resampling(X, y, sampler, ax, title=None):
X_res, y_res = sampler.fit_resample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor="k")
if title is None:
title = f"Resampling with {sampler.__class__.__name__}"
ax.set_title(title)
sns.despine(ax=ax, offset=10)
# %% [markdown]
# The following function will be used to plot the decision function of a
# classifier given some data.
# %%
import numpy as np
def plot_decision_function(X, y, clf, ax, title=None):
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor="k")
if title is not None:
ax.set_title(title)
# %%
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
# %% [markdown]
# Prototype generation: under-sampling by generating new samples
# --------------------------------------------------------------
#
# :class:`~imblearn.under_sampling.ClusterCentroids` under-samples by replacing
# the original samples by the centroids of the cluster found.
# %%
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans
from imblearn import FunctionSampler
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids
X, y = create_dataset(n_samples=400, weights=(0.05, 0.15, 0.8), class_sep=0.8)
samplers = {
FunctionSampler(), # identity resampler
ClusterCentroids(
estimator=MiniBatchKMeans(n_init=1, random_state=0), random_state=0
),
}
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, model, ax[0], title=f"Decision function with {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.tight_layout()
# %% [markdown]
# Prototype selection: under-sampling by selecting existing samples
# -----------------------------------------------------------------
#
# The algorithm performing prototype selection can be subdivided into two
# groups: (i) the controlled under-sampling methods and (ii) the cleaning
# under-sampling methods.
#
# With the controlled under-sampling methods, the number of samples to be
# selected can be specified.
# :class:`~imblearn.under_sampling.RandomUnderSampler` is the most naive way of
# performing such selection by randomly selecting a given number of samples by
# the targetted class.
# %%
from imblearn.under_sampling import RandomUnderSampler
X, y = create_dataset(n_samples=400, weights=(0.05, 0.15, 0.8), class_sep=0.8)
samplers = {
FunctionSampler(), # identity resampler
RandomUnderSampler(random_state=0),
}
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, model, ax[0], title=f"Decision function with {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.tight_layout()
# %% [markdown]
# :class:`~imblearn.under_sampling.NearMiss` algorithms implement some
# heuristic rules in order to select samples. NearMiss-1 selects samples from
# the majority class for which the average distance of the :math:`k`` nearest
# samples of the minority class is the smallest. NearMiss-2 selects the samples
# from the majority class for which the average distance to the farthest
# samples of the negative class is the smallest. NearMiss-3 is a 2-step
# algorithm: first, for each minority sample, their :math:`m`
# nearest-neighbors will be kept; then, the majority samples selected are the
# on for which the average distance to the :math:`k` nearest neighbors is the
# largest.
# %%
from imblearn.under_sampling import NearMiss
X, y = create_dataset(n_samples=1000, weights=(0.05, 0.15, 0.8), class_sep=1.5)
samplers = [NearMiss(version=1), NearMiss(version=2), NearMiss(version=3)]
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(15, 25))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X,
y,
model,
ax[0],
title=f"Decision function for {sampler.__class__.__name__}-{sampler.version}",
)
plot_resampling(
X,
y,
sampler,
ax[1],
title=f"Resampling using {sampler.__class__.__name__}-{sampler.version}",
)
fig.tight_layout()
# %% [markdown]
# :class:`~imblearn.under_sampling.EditedNearestNeighbours` removes samples of
# the majority class for which their class differ from the one of their
# nearest-neighbors. This sieve can be repeated which is the principle of the
# :class:`~imblearn.under_sampling.RepeatedEditedNearestNeighbours`.
# :class:`~imblearn.under_sampling.AllKNN` is slightly different from the
# :class:`~imblearn.under_sampling.RepeatedEditedNearestNeighbours` by changing
# the :math:`k` parameter of the internal nearest neighors algorithm,
# increasing it at each iteration.
# %%
from imblearn.under_sampling import (
AllKNN,
EditedNearestNeighbours,
RepeatedEditedNearestNeighbours,
)
X, y = create_dataset(n_samples=500, weights=(0.2, 0.3, 0.5), class_sep=0.8)
samplers = [
EditedNearestNeighbours(),
RepeatedEditedNearestNeighbours(),
AllKNN(allow_minority=True),
]
fig, axs = plt.subplots(3, 2, figsize=(15, 25))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function for \n{sampler.__class__.__name__}"
)
plot_resampling(
X, y, sampler, ax[1], title=f"Resampling using \n{sampler.__class__.__name__}"
)
fig.tight_layout()
# %% [markdown]
# :class:`~imblearn.under_sampling.CondensedNearestNeighbour` makes use of a
# 1-NN to iteratively decide if a sample should be kept in a dataset or not.
# The issue is that :class:`~imblearn.under_sampling.CondensedNearestNeighbour`
# is sensitive to noise by preserving the noisy samples.
# :class:`~imblearn.under_sampling.OneSidedSelection` also used the 1-NN and
# use :class:`~imblearn.under_sampling.TomekLinks` to remove the samples
# considered noisy. The
# :class:`~imblearn.under_sampling.NeighbourhoodCleaningRule` use a
# :class:`~imblearn.under_sampling.EditedNearestNeighbours` to remove some
# sample. Additionally, they use a 3 nearest-neighbors to remove samples which
# do not agree with this rule.
# %%
from imblearn.under_sampling import (
CondensedNearestNeighbour,
NeighbourhoodCleaningRule,
OneSidedSelection,
)
X, y = create_dataset(n_samples=500, weights=(0.2, 0.3, 0.5), class_sep=0.8)
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(15, 25))
samplers = [
CondensedNearestNeighbour(random_state=0),
OneSidedSelection(random_state=0),
NeighbourhoodCleaningRule(n_neighbors=11),
]
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function for \n{sampler.__class__.__name__}"
)
plot_resampling(
X, y, sampler, ax[1], title=f"Resampling using \n{sampler.__class__.__name__}"
)
fig.tight_layout()
# %% [markdown]
# :class:`~imblearn.under_sampling.InstanceHardnessThreshold` uses the
# prediction of classifier to exclude samples. All samples which are classified
# with a low probability will be removed.
# %%
from imblearn.under_sampling import InstanceHardnessThreshold
samplers = {
FunctionSampler(), # identity resampler
InstanceHardnessThreshold(
estimator=LogisticRegression(),
random_state=0,
),
}
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X,
y,
model,
ax[0],
title=f"Decision function with \n{sampler.__class__.__name__}",
)
plot_resampling(
X, y, sampler, ax[1], title=f"Resampling using \n{sampler.__class__.__name__}"
)
fig.tight_layout()
plt.show()
| 9,708 | 30.11859 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/ensemble/plot_bagging_classifier.py | """
=================================
Bagging classifiers using sampler
=================================
In this example, we show how
:class:`~imblearn.ensemble.BalancedBaggingClassifier` can be used to create a
large variety of classifiers by giving different samplers.
We will give several examples that have been published in the passed year.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Generate an imbalanced dataset
# ------------------------------
#
# For this example, we will create a synthetic dataset using the function
# :func:`~sklearn.datasets.make_classification`. The problem will be a toy
# classification problem with a ratio of 1:9 between the two classes.
# %%
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=10_000,
n_features=10,
weights=[0.1, 0.9],
class_sep=0.5,
random_state=0,
)
# %%
import pandas as pd
pd.Series(y).value_counts(normalize=True)
# %% [markdown]
# In the following sections, we will show a couple of algorithms that have
# been proposed over the years. We intend to illustrate how one can reuse the
# :class:`~imblearn.ensemble.BalancedBaggingClassifier` by passing different
# sampler.
from sklearn.ensemble import BaggingClassifier
# %%
from sklearn.model_selection import cross_validate
ebb = BaggingClassifier()
cv_results = cross_validate(ebb, X, y, scoring="balanced_accuracy")
print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}")
# %% [markdown]
# Exactly Balanced Bagging and Over-Bagging
# -----------------------------------------
#
# The :class:`~imblearn.ensemble.BalancedBaggingClassifier` can use in
# conjunction with a :class:`~imblearn.under_sampling.RandomUnderSampler` or
# :class:`~imblearn.over_sampling.RandomOverSampler`. These methods are
# referred as Exactly Balanced Bagging and Over-Bagging, respectively and have
# been proposed first in [1]_.
# %%
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.under_sampling import RandomUnderSampler
# Exactly Balanced Bagging
ebb = BalancedBaggingClassifier(sampler=RandomUnderSampler())
cv_results = cross_validate(ebb, X, y, scoring="balanced_accuracy")
print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}")
# %%
from imblearn.over_sampling import RandomOverSampler
# Over-bagging
over_bagging = BalancedBaggingClassifier(sampler=RandomOverSampler())
cv_results = cross_validate(over_bagging, X, y, scoring="balanced_accuracy")
print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}")
# %% [markdown]
# SMOTE-Bagging
# -------------
#
# Instead of using a :class:`~imblearn.over_sampling.RandomOverSampler` that
# make a bootstrap, an alternative is to use
# :class:`~imblearn.over_sampling.SMOTE` as an over-sampler. This is known as
# SMOTE-Bagging [2]_.
# %%
from imblearn.over_sampling import SMOTE
# SMOTE-Bagging
smote_bagging = BalancedBaggingClassifier(sampler=SMOTE())
cv_results = cross_validate(smote_bagging, X, y, scoring="balanced_accuracy")
print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}")
# %% [markdown]
# Roughly Balanced Bagging
# ------------------------
# While using a :class:`~imblearn.under_sampling.RandomUnderSampler` or
# :class:`~imblearn.over_sampling.RandomOverSampler` will create exactly the
# desired number of samples, it does not follow the statistical spirit wanted
# in the bagging framework. The authors in [3]_ proposes to use a negative
# binomial distribution to compute the number of samples of the majority
# class to be selected and then perform a random under-sampling.
#
# Here, we illustrate this method by implementing a function in charge of
# resampling and use the :class:`~imblearn.FunctionSampler` to integrate it
# within a :class:`~imblearn.pipeline.Pipeline` and
# :class:`~sklearn.model_selection.cross_validate`.
# %%
from collections import Counter
import numpy as np
from imblearn import FunctionSampler
def roughly_balanced_bagging(X, y, replace=False):
"""Implementation of Roughly Balanced Bagging for binary problem."""
# find the minority and majority classes
class_counts = Counter(y)
majority_class = max(class_counts, key=class_counts.get)
minority_class = min(class_counts, key=class_counts.get)
# compute the number of sample to draw from the majority class using
# a negative binomial distribution
n_minority_class = class_counts[minority_class]
n_majority_resampled = np.random.negative_binomial(n=n_minority_class, p=0.5)
# draw randomly with or without replacement
majority_indices = np.random.choice(
np.flatnonzero(y == majority_class),
size=n_majority_resampled,
replace=replace,
)
minority_indices = np.random.choice(
np.flatnonzero(y == minority_class),
size=n_minority_class,
replace=replace,
)
indices = np.hstack([majority_indices, minority_indices])
return X[indices], y[indices]
# Roughly Balanced Bagging
rbb = BalancedBaggingClassifier(
sampler=FunctionSampler(func=roughly_balanced_bagging, kw_args={"replace": True})
)
cv_results = cross_validate(rbb, X, y, scoring="balanced_accuracy")
print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}")
# %% [markdown]
# .. topic:: References:
#
# .. [1] R. Maclin, and D. Opitz. "An empirical evaluation of bagging and
# boosting." AAAI/IAAI 1997 (1997): 546-551.
#
# .. [2] S. Wang, and X. Yao. "Diversity analysis on imbalanced data sets by
# using ensemble models." 2009 IEEE symposium on computational
# intelligence and data mining. IEEE, 2009.
#
# .. [3] S. Hido, H. Kashima, and Y. Takahashi. "Roughly balanced bagging
# for imbalanced data." Statistical Analysis and Data Mining: The ASA
# Data Science Journal 2.5‐6 (2009): 412-426.
| 6,020 | 32.636872 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/ensemble/plot_comparison_ensemble_classifier.py | """
=============================================
Compare ensemble classifiers using resampling
=============================================
Ensemble classifiers have shown to improve classification performance compare
to single learner. However, they will be affected by class imbalance. This
example shows the benefit of balancing the training set before to learn
learners. We are making the comparison with non-balanced ensemble methods.
We make a comparison using the balanced accuracy and geometric mean which are
metrics widely used in the literature to evaluate models learned on imbalanced
set.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Load an imbalanced dataset
# --------------------------
#
# We will load the UCI SatImage dataset which has an imbalanced ratio of 9.3:1
# (number of majority sample for a minority sample). The data are then split
# into training and testing.
from sklearn.model_selection import train_test_split
# %%
from imblearn.datasets import fetch_datasets
satimage = fetch_datasets()["satimage"]
X, y = satimage.data, satimage.target
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0)
# %% [markdown]
# Classification using a single decision tree
# -------------------------------------------
#
# We train a decision tree classifier which will be used as a baseline for the
# rest of this example.
#
# The results are reported in terms of balanced accuracy and geometric mean
# which are metrics widely used in the literature to validate model trained on
# imbalanced set.
# %%
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier()
tree.fit(X_train, y_train)
y_pred_tree = tree.predict(X_test)
# %%
from sklearn.metrics import balanced_accuracy_score
from imblearn.metrics import geometric_mean_score
print("Decision tree classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_tree):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_tree):.2f}"
)
# %%
import seaborn as sns
from sklearn.metrics import ConfusionMatrixDisplay
sns.set_context("poster")
disp = ConfusionMatrixDisplay.from_estimator(tree, X_test, y_test, colorbar=False)
_ = disp.ax_.set_title("Decision tree")
# %% [markdown]
# Classification using bagging classifier with and without sampling
# -----------------------------------------------------------------
#
# Instead of using a single tree, we will check if an ensemble of decision tree
# can actually alleviate the issue induced by the class imbalancing. First, we
# will use a bagging classifier and its counter part which internally uses a
# random under-sampling to balanced each bootstrap sample.
# %%
from sklearn.ensemble import BaggingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bagging = BaggingClassifier(n_estimators=50, random_state=0)
balanced_bagging = BalancedBaggingClassifier(n_estimators=50, random_state=0)
bagging.fit(X_train, y_train)
balanced_bagging.fit(X_train, y_train)
y_pred_bc = bagging.predict(X_test)
y_pred_bbc = balanced_bagging.predict(X_test)
# %% [markdown]
# Balancing each bootstrap sample allows to increase significantly the balanced
# accuracy and the geometric mean.
# %%
print("Bagging classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_bc):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_bc):.2f}"
)
print("Balanced Bagging classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_bbc):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_bbc):.2f}"
)
# %%
import matplotlib.pyplot as plt
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
ConfusionMatrixDisplay.from_estimator(
bagging, X_test, y_test, ax=axs[0], colorbar=False
)
axs[0].set_title("Bagging")
ConfusionMatrixDisplay.from_estimator(
balanced_bagging, X_test, y_test, ax=axs[1], colorbar=False
)
axs[1].set_title("Balanced Bagging")
fig.tight_layout()
# %% [markdown]
# Classification using random forest classifier with and without sampling
# -----------------------------------------------------------------------
#
# Random forest is another popular ensemble method and it is usually
# outperforming bagging. Here, we used a vanilla random forest and its balanced
# counterpart in which each bootstrap sample is balanced.
# %%
from sklearn.ensemble import RandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
rf = RandomForestClassifier(n_estimators=50, random_state=0)
brf = BalancedRandomForestClassifier(
n_estimators=50,
sampling_strategy="all",
replacement=True,
bootstrap=False,
random_state=0,
)
rf.fit(X_train, y_train)
brf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_test)
y_pred_brf = brf.predict(X_test)
# %% [markdown]
# Similarly to the previous experiment, the balanced classifier outperform the
# classifier which learn from imbalanced bootstrap samples. In addition, random
# forest outperforms the bagging classifier.
# %%
print("Random Forest classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_rf):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_rf):.2f}"
)
print("Balanced Random Forest classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_brf):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_brf):.2f}"
)
# %%
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
ConfusionMatrixDisplay.from_estimator(rf, X_test, y_test, ax=axs[0], colorbar=False)
axs[0].set_title("Random forest")
ConfusionMatrixDisplay.from_estimator(brf, X_test, y_test, ax=axs[1], colorbar=False)
axs[1].set_title("Balanced random forest")
fig.tight_layout()
# %% [markdown]
# Boosting classifier
# -------------------
#
# In the same manner, easy ensemble classifier is a bag of balanced AdaBoost
# classifier. However, it will be slower to train than random forest and will
# achieve worse performance.
# %%
from sklearn.ensemble import AdaBoostClassifier
from imblearn.ensemble import EasyEnsembleClassifier, RUSBoostClassifier
estimator = AdaBoostClassifier(n_estimators=10)
eec = EasyEnsembleClassifier(n_estimators=10, estimator=estimator)
eec.fit(X_train, y_train)
y_pred_eec = eec.predict(X_test)
rusboost = RUSBoostClassifier(n_estimators=10, estimator=estimator)
rusboost.fit(X_train, y_train)
y_pred_rusboost = rusboost.predict(X_test)
# %%
print("Easy ensemble classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_eec):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_eec):.2f}"
)
print("RUSBoost classifier performance:")
print(
f"Balanced accuracy: {balanced_accuracy_score(y_test, y_pred_rusboost):.2f} - "
f"Geometric mean {geometric_mean_score(y_test, y_pred_rusboost):.2f}"
)
# %%
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
ConfusionMatrixDisplay.from_estimator(eec, X_test, y_test, ax=axs[0], colorbar=False)
axs[0].set_title("Easy Ensemble")
ConfusionMatrixDisplay.from_estimator(
rusboost, X_test, y_test, ax=axs[1], colorbar=False
)
axs[1].set_title("RUSBoost classifier")
fig.tight_layout()
plt.show()
| 7,338 | 30.497854 | 85 | py |
imbalanced-learn | imbalanced-learn-master/examples/applications/plot_impact_imbalanced_classes.py | """
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
from sklearn.dummy import DummyClassifier
# %%
from sklearn.model_selection import cross_validate
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_selector as selector
from sklearn.compose import make_column_transformer
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
from sklearn.ensemble import RandomForestClassifier
# %%
from sklearn.preprocessing import OrdinalEncoder
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(
sampling_strategy="all",
replacement=True,
bootstrap=False,
random_state=42,
n_jobs=2,
),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
| 12,377 | 32.274194 | 87 | py |
imbalanced-learn | imbalanced-learn-master/examples/applications/porto_seguro_keras_under_sampling.py | """
==========================================================
Porto Seguro: balancing samples in mini-batches with Keras
==========================================================
This example compares two strategies to train a neural-network on the Porto
Seguro Kaggle data set [1]_. The data set is imbalanced and we show that
balancing each mini-batch allows to improve performance and reduce the training
time.
References
----------
.. [1] https://www.kaggle.com/c/porto-seguro-safe-driver-prediction/data
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
print(__doc__)
###############################################################################
# Data loading
###############################################################################
from collections import Counter
import numpy as np
import pandas as pd
###############################################################################
# First, you should download the Porto Seguro data set from Kaggle. See the
# link in the introduction.
training_data = pd.read_csv("./input/train.csv")
testing_data = pd.read_csv("./input/test.csv")
y_train = training_data[["id", "target"]].set_index("id")
X_train = training_data.drop(["target"], axis=1).set_index("id")
X_test = testing_data.set_index("id")
###############################################################################
# The data set is imbalanced and it will have an effect on the fitting.
print(f"The data set is imbalanced: {Counter(y_train['target'])}")
###############################################################################
# Define the pre-processing pipeline
###############################################################################
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, StandardScaler
def convert_float64(X):
return X.astype(np.float64)
###############################################################################
# We want to standard scale the numerical features while we want to one-hot
# encode the categorical features. In this regard, we make use of the
# :class:`~sklearn.compose.ColumnTransformer`.
numerical_columns = [
name for name in X_train.columns if "_calc_" in name and "_bin" not in name
]
numerical_pipeline = make_pipeline(
FunctionTransformer(func=convert_float64, validate=False), StandardScaler()
)
categorical_columns = [name for name in X_train.columns if "_cat" in name]
categorical_pipeline = make_pipeline(
SimpleImputer(missing_values=-1, strategy="most_frequent"),
OneHotEncoder(categories="auto"),
)
preprocessor = ColumnTransformer(
[
("numerical_preprocessing", numerical_pipeline, numerical_columns),
(
"categorical_preprocessing",
categorical_pipeline,
categorical_columns,
),
],
remainder="drop",
)
# Create an environment variable to avoid using the GPU. This can be changed.
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tensorflow.keras.layers import Activation, BatchNormalization, Dense, Dropout
###############################################################################
# Create a neural-network
###############################################################################
from tensorflow.keras.models import Sequential
def make_model(n_features):
model = Sequential()
model.add(Dense(200, input_shape=(n_features,), kernel_initializer="glorot_normal"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(100, kernel_initializer="glorot_normal", use_bias=False))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.25))
model.add(Dense(50, kernel_initializer="glorot_normal", use_bias=False))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.15))
model.add(Dense(25, kernel_initializer="glorot_normal", use_bias=False))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.1))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
###############################################################################
# We create a decorator to report the computation time
import time
from functools import wraps
def timeit(f):
@wraps(f)
def wrapper(*args, **kwds):
start_time = time.time()
result = f(*args, **kwds)
elapsed_time = time.time() - start_time
print(f"Elapsed computation time: {elapsed_time:.3f} secs")
return (elapsed_time, result)
return wrapper
###############################################################################
# The first model will be trained using the ``fit`` method and with imbalanced
# mini-batches.
import tensorflow
from sklearn.metrics import roc_auc_score
from sklearn.utils import parse_version
tf_version = parse_version(tensorflow.__version__)
@timeit
def fit_predict_imbalanced_model(X_train, y_train, X_test, y_test):
model = make_model(X_train.shape[1])
model.fit(X_train, y_train, epochs=2, verbose=1, batch_size=1000)
if tf_version < parse_version("2.6"):
# predict_proba was removed in tensorflow 2.6
predict_method = "predict_proba"
else:
predict_method = "predict"
y_pred = getattr(model, predict_method)(X_test, batch_size=1000)
return roc_auc_score(y_test, y_pred)
###############################################################################
# In the contrary, we will use imbalanced-learn to create a generator of
# mini-batches which will yield balanced mini-batches.
from imblearn.keras import BalancedBatchGenerator
@timeit
def fit_predict_balanced_model(X_train, y_train, X_test, y_test):
model = make_model(X_train.shape[1])
training_generator = BalancedBatchGenerator(
X_train, y_train, batch_size=1000, random_state=42
)
model.fit(training_generator, epochs=5, verbose=1)
y_pred = model.predict(X_test, batch_size=1000)
return roc_auc_score(y_test, y_pred)
###############################################################################
# Classification loop
###############################################################################
###############################################################################
# We will perform a 10-fold cross-validation and train the neural-network with
# the two different strategies previously presented.
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=10)
cv_results_imbalanced = []
cv_time_imbalanced = []
cv_results_balanced = []
cv_time_balanced = []
for train_idx, valid_idx in skf.split(X_train, y_train):
X_local_train = preprocessor.fit_transform(X_train.iloc[train_idx])
y_local_train = y_train.iloc[train_idx].values.ravel()
X_local_test = preprocessor.transform(X_train.iloc[valid_idx])
y_local_test = y_train.iloc[valid_idx].values.ravel()
elapsed_time, roc_auc = fit_predict_imbalanced_model(
X_local_train, y_local_train, X_local_test, y_local_test
)
cv_time_imbalanced.append(elapsed_time)
cv_results_imbalanced.append(roc_auc)
elapsed_time, roc_auc = fit_predict_balanced_model(
X_local_train, y_local_train, X_local_test, y_local_test
)
cv_time_balanced.append(elapsed_time)
cv_results_balanced.append(roc_auc)
###############################################################################
# Plot of the results and computation time
###############################################################################
df_results = pd.DataFrame(
{
"Balanced model": cv_results_balanced,
"Imbalanced model": cv_results_imbalanced,
}
)
df_results = df_results.unstack().reset_index()
df_time = pd.DataFrame(
{"Balanced model": cv_time_balanced, "Imbalanced model": cv_time_imbalanced}
)
df_time = df_time.unstack().reset_index()
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure()
sns.boxplot(y="level_0", x=0, data=df_time)
sns.despine(top=True, right=True, left=True)
plt.xlabel("time [s]")
plt.ylabel("")
plt.title("Computation time difference using a random under-sampling")
plt.figure()
sns.boxplot(y="level_0", x=0, data=df_results, whis=10.0)
sns.despine(top=True, right=True, left=True)
ax = plt.gca()
ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, pos: "%i%%" % (100 * x)))
plt.xlabel("ROC-AUC")
plt.ylabel("")
plt.title("Difference in terms of ROC-AUC using a random under-sampling")
| 8,747 | 32.776062 | 88 | py |
imbalanced-learn | imbalanced-learn-master/examples/applications/plot_topic_classication.py | """
=================================================
Example of topic classification in text documents
=================================================
This example shows how to balance the text data before to train a classifier.
Note that for this example, the data are slightly imbalanced but it can happen
that for some data sets, the imbalanced ratio is more significant.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Setting the data set
# --------------------
#
# We use a part of the 20 newsgroups data set by loading 4 topics. Using the
# scikit-learn loader, the data are split into a training and a testing set.
#
# Note the class \#3 is the minority class and has almost twice less samples
# than the majority class.
# %%
from sklearn.datasets import fetch_20newsgroups
categories = [
"alt.atheism",
"talk.religion.misc",
"comp.graphics",
"sci.space",
]
newsgroups_train = fetch_20newsgroups(subset="train", categories=categories)
newsgroups_test = fetch_20newsgroups(subset="test", categories=categories)
X_train = newsgroups_train.data
X_test = newsgroups_test.data
y_train = newsgroups_train.target
y_test = newsgroups_test.target
# %%
from collections import Counter
print(f"Training class distributions summary: {Counter(y_train)}")
print(f"Test class distributions summary: {Counter(y_test)}")
# %% [markdown]
# The usual scikit-learn pipeline
# -------------------------------
#
# You might usually use scikit-learn pipeline by combining the TF-IDF
# vectorizer to feed a multinomial naive bayes classifier. A classification
# report summarized the results on the testing set.
#
# As expected, the recall of the class \#3 is low mainly due to the class
# imbalanced.
# %%
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# %%
from imblearn.metrics import classification_report_imbalanced
print(classification_report_imbalanced(y_test, y_pred))
# %% [markdown]
# Balancing the class before classification
# -----------------------------------------
#
# To improve the prediction of the class \#3, it could be interesting to apply
# a balancing before to train the naive bayes classifier. Therefore, we will
# use a :class:`~imblearn.under_sampling.RandomUnderSampler` to equalize the
# number of samples in all the classes before the training.
#
# It is also important to note that we are using the
# :class:`~imblearn.pipeline.make_pipeline` function implemented in
# imbalanced-learn to properly handle the samplers.
from imblearn.pipeline import make_pipeline as make_pipeline_imb
# %%
from imblearn.under_sampling import RandomUnderSampler
model = make_pipeline_imb(TfidfVectorizer(), RandomUnderSampler(), MultinomialNB())
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# %% [markdown]
# Although the results are almost identical, it can be seen that the resampling
# allowed to correct the poor recall of the class \#3 at the cost of reducing
# the other metrics for the other classes. However, the overall results are
# slightly better.
# %%
print(classification_report_imbalanced(y_test, y_pred))
| 3,358 | 30.101852 | 83 | py |
imbalanced-learn | imbalanced-learn-master/examples/applications/plot_multi_class_under_sampling.py | """
=============================================
Multiclass classification with under-sampling
=============================================
Some balancing methods allow for balancing dataset with multiples classes.
We provide an example to illustrate the use of those methods which do
not differ from the binary case.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
from collections import Counter
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from imblearn.datasets import make_imbalance
from imblearn.metrics import classification_report_imbalanced
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import NearMiss
print(__doc__)
RANDOM_STATE = 42
# Create a folder to fetch the dataset
iris = load_iris()
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 25, 1: 50, 2: 50},
random_state=RANDOM_STATE,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=RANDOM_STATE)
print(f"Training target statistics: {Counter(y_train)}")
print(f"Testing target statistics: {Counter(y_test)}")
# Create a pipeline
pipeline = make_pipeline(NearMiss(version=2), StandardScaler(), LogisticRegression())
pipeline.fit(X_train, y_train)
# Classify and report the results
print(classification_report_imbalanced(y_test, pipeline.predict(X_test)))
| 1,494 | 28.313725 | 85 | py |
imbalanced-learn | imbalanced-learn-master/examples/applications/plot_over_sampling_benchmark_lfw.py | """
==========================================================
Benchmark over-sampling methods in a face recognition task
==========================================================
In this face recognition example two faces are used from the LFW
(Faces in the Wild) dataset. Several implemented over-sampling
methods are used in conjunction with a 3NN classifier in order
to examine the improvement of the classifier's output quality
by using an over-sampler.
"""
# Authors: Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# Load the dataset
# ----------------
#
# We will use a dataset containing image from know person where we will
# build a model to recognize the person on the image. We will make this problem
# a binary problem by taking picture of only George W. Bush and Bill Clinton.
# %%
import numpy as np
from sklearn.datasets import fetch_lfw_people
data = fetch_lfw_people()
george_bush_id = 1871 # Photos of George W. Bush
bill_clinton_id = 531 # Photos of Bill Clinton
classes = [george_bush_id, bill_clinton_id]
classes_name = np.array(["B. Clinton", "G.W. Bush"], dtype=object)
# %%
mask_photos = np.isin(data.target, classes)
X, y = data.data[mask_photos], data.target[mask_photos]
y = (y == george_bush_id).astype(np.int8)
y = classes_name[y]
# %% [markdown]
# We can check the ratio between the two classes.
# %%
import matplotlib.pyplot as plt
import pandas as pd
class_distribution = pd.Series(y).value_counts(normalize=True)
ax = class_distribution.plot.barh()
ax.set_title("Class distribution")
pos_label = class_distribution.idxmin()
plt.tight_layout()
print(f"The positive label considered as the minority class is {pos_label}")
# %% [markdown]
# We see that we have an imbalanced classification problem with ~95% of the
# data belonging to the class G.W. Bush.
#
# Compare over-sampling approaches
# --------------------------------
#
# We will use different over-sampling approaches and use a kNN classifier
# to check if we can recognize the 2 presidents. The evaluation will be
# performed through cross-validation and we will plot the mean ROC curve.
#
# We will create different pipelines and evaluate them.
from sklearn.neighbors import KNeighborsClassifier
from imblearn import FunctionSampler
from imblearn.over_sampling import ADASYN, SMOTE, RandomOverSampler
from imblearn.pipeline import make_pipeline
classifier = KNeighborsClassifier(n_neighbors=3)
pipeline = [
make_pipeline(FunctionSampler(), classifier),
make_pipeline(RandomOverSampler(random_state=42), classifier),
make_pipeline(ADASYN(random_state=42), classifier),
make_pipeline(SMOTE(random_state=42), classifier),
]
# %%
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold(n_splits=3)
# %% [markdown]
# We will compute the mean ROC curve for each pipeline using a different splits
# provided by the :class:`~sklearn.model_selection.StratifiedKFold`
# cross-validation.
# %%
from sklearn.metrics import RocCurveDisplay, auc, roc_curve
disp = []
for model in pipeline:
# compute the mean fpr/tpr to get the mean ROC curve
mean_tpr, mean_fpr = 0.0, np.linspace(0, 1, 100)
for train, test in cv.split(X, y):
model.fit(X[train], y[train])
y_proba = model.predict_proba(X[test])
pos_label_idx = np.flatnonzero(model.classes_ == pos_label)[0]
fpr, tpr, thresholds = roc_curve(
y[test], y_proba[:, pos_label_idx], pos_label=pos_label
)
mean_tpr += np.interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
# Create a display that we will reuse to make the aggregated plots for
# all methods
disp.append(
RocCurveDisplay(
fpr=mean_fpr,
tpr=mean_tpr,
roc_auc=mean_auc,
estimator_name=f"{model[0].__class__.__name__}",
)
)
# %% [markdown]
# In the previous cell, we created the different mean ROC curve and we can plot
# them on the same plot.
# %%
fig, ax = plt.subplots(figsize=(9, 9))
for d in disp:
d.plot(ax=ax, linestyle="--")
ax.plot([0, 1], [0, 1], linestyle="--", color="k")
ax.axis("square")
fig.suptitle("Comparison of over-sampling methods \nwith a 3NN classifier")
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
sns.despine(offset=10, ax=ax)
plt.legend(loc="lower right", fontsize=16)
plt.tight_layout()
plt.show()
# %% [markdown]
# We see that for this task, methods that are generating new samples with some
# interpolation (i.e. ADASYN and SMOTE) perform better than random
# over-sampling or no resampling.
| 4,750 | 29.651613 | 79 | py |
imbalanced-learn | imbalanced-learn-master/examples/applications/plot_outlier_rejections.py | """
===============================================================
Customized sampler to implement an outlier rejections estimator
===============================================================
This example illustrates the use of a custom sampler to implement an outlier
rejections estimator. It can be used easily within a pipeline in which the
number of samples can vary during training, which usually is a limitation of
the current scikit-learn pipeline.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs, make_moons
from sklearn.ensemble import IsolationForest
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from imblearn import FunctionSampler
from imblearn.pipeline import make_pipeline
print(__doc__)
rng = np.random.RandomState(42)
def plot_scatter(X, y, title):
"""Function to plot some data as a scatter plot."""
plt.figure()
plt.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1")
plt.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0")
plt.legend()
plt.title(title)
##############################################################################
# Toy data generation
##############################################################################
##############################################################################
# We are generating some non Gaussian data set contaminated with some unform
# noise.
moons, _ = make_moons(n_samples=500, noise=0.05)
blobs, _ = make_blobs(
n_samples=500, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25
)
outliers = rng.uniform(low=-3, high=3, size=(500, 2))
X_train = np.vstack([moons, blobs, outliers])
y_train = np.hstack(
[
np.ones(moons.shape[0], dtype=np.int8),
np.zeros(blobs.shape[0], dtype=np.int8),
rng.randint(0, 2, size=outliers.shape[0], dtype=np.int8),
]
)
plot_scatter(X_train, y_train, "Training dataset")
##############################################################################
# We will generate some cleaned test data without outliers.
moons, _ = make_moons(n_samples=50, noise=0.05)
blobs, _ = make_blobs(
n_samples=50, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25
)
X_test = np.vstack([moons, blobs])
y_test = np.hstack(
[np.ones(moons.shape[0], dtype=np.int8), np.zeros(blobs.shape[0], dtype=np.int8)]
)
plot_scatter(X_test, y_test, "Testing dataset")
##############################################################################
# How to use the :class:`~imblearn.FunctionSampler`
##############################################################################
##############################################################################
# We first define a function which will use
# :class:`~sklearn.ensemble.IsolationForest` to eliminate some outliers from
# our dataset during training. The function passed to the
# :class:`~imblearn.FunctionSampler` will be called when using the method
# ``fit_resample``.
def outlier_rejection(X, y):
"""This will be our function used to resample our dataset."""
model = IsolationForest(max_samples=100, contamination=0.4, random_state=rng)
model.fit(X)
y_pred = model.predict(X)
return X[y_pred == 1], y[y_pred == 1]
reject_sampler = FunctionSampler(func=outlier_rejection)
X_inliers, y_inliers = reject_sampler.fit_resample(X_train, y_train)
plot_scatter(X_inliers, y_inliers, "Training data without outliers")
##############################################################################
# Integrate it within a pipeline
##############################################################################
##############################################################################
# By elimnating outliers before the training, the classifier will be less
# affected during the prediction.
pipe = make_pipeline(
FunctionSampler(func=outlier_rejection),
LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng),
)
y_pred = pipe.fit(X_train, y_train).predict(X_test)
print(classification_report(y_test, y_pred))
clf = LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng)
y_pred = clf.fit(X_train, y_train).predict(X_test)
print(classification_report(y_test, y_pred))
plt.show()
| 4,353 | 34.688525 | 85 | py |
imbalanced-learn | imbalanced-learn-master/maint_tools/test_docstring.py | import importlib
import inspect
import pkgutil
import re
from inspect import signature
from typing import Optional
import pytest
import imblearn
from imblearn.utils.testing import all_estimators
numpydoc_validation = pytest.importorskip("numpydoc.validate")
# List of whitelisted modules and methods; regexp are supported.
# These docstrings will fail because they are inheriting from scikit-learn
DOCSTRING_WHITELIST = [
"ADASYN$",
"ADASYN.",
"AllKNN$",
"AllKNN.",
"BalancedBaggingClassifier$",
"BalancedBaggingClassifier.",
"BalancedRandomForestClassifier$",
"BalancedRandomForestClassifier.",
"ClusterCentroids$",
"ClusterCentroids.",
"CondensedNearestNeighbour$",
"CondensedNearestNeighbour.",
"EasyEnsembleClassifier$",
"EasyEnsembleClassifier.",
"EditedNearestNeighbours$",
"EditedNearestNeighbours.",
"FunctionSampler$",
"FunctionSampler.",
"InstanceHardnessThreshold$",
"InstanceHardnessThreshold.",
"SMOTE$",
"SMOTE.",
"NearMiss$",
"NearMiss.",
"NeighbourhoodCleaningRule$",
"NeighbourhoodCleaningRule.",
"OneSidedSelection$",
"OneSidedSelection.",
"Pipeline$",
"Pipeline.",
"RUSBoostClassifier$",
"RUSBoostClassifier.",
"RandomOverSampler$",
"RandomOverSampler.",
"RandomUnderSampler$",
"RandomUnderSampler.",
"TomekLinks$",
"TomekLinks",
"ValueDifferenceMetric$",
"ValueDifferenceMetric.",
]
FUNCTION_DOCSTRING_IGNORE_LIST = [
"imblearn.tensorflow._generator.balanced_batch_generator",
]
FUNCTION_DOCSTRING_IGNORE_LIST = set(FUNCTION_DOCSTRING_IGNORE_LIST)
def get_all_methods():
estimators = all_estimators()
for name, Estimator in estimators:
if name.startswith("_"):
# skip private classes
continue
methods = []
for name in dir(Estimator):
if name.startswith("_"):
continue
method_obj = getattr(Estimator, name)
if hasattr(method_obj, "__call__") or isinstance(method_obj, property):
methods.append(name)
methods.append(None)
for method in sorted(methods, key=lambda x: str(x)):
yield Estimator, method
def _is_checked_function(item):
if not inspect.isfunction(item):
return False
if item.__name__.startswith("_"):
return False
mod = item.__module__
if not mod.startswith("imblearn.") or mod.endswith("estimator_checks"):
return False
return True
def get_all_functions_names():
"""Get all public functions define in the imblearn module"""
modules_to_ignore = {
"tests",
"estimator_checks",
}
all_functions_names = set()
for module_finder, module_name, ispkg in pkgutil.walk_packages(
path=imblearn.__path__, prefix="imblearn."
):
module_parts = module_name.split(".")
if (
any(part in modules_to_ignore for part in module_parts)
or "._" in module_name
):
continue
module = importlib.import_module(module_name)
functions = inspect.getmembers(module, _is_checked_function)
for name, func in functions:
full_name = f"{func.__module__}.{func.__name__}"
all_functions_names.add(full_name)
return sorted(all_functions_names)
def filter_errors(errors, method, Estimator=None):
"""
Ignore some errors based on the method type.
These rules are specific for scikit-learn."""
for code, message in errors:
# We ignore following error code,
# - RT02: The first line of the Returns section
# should contain only the type, ..
# (as we may need refer to the name of the returned
# object)
# - GL01: Docstring text (summary) should start in the line
# immediately after the opening quotes (not in the same line,
# or leaving a blank line in between)
# - GL02: If there's a blank line, it should be before the
# first line of the Returns section, not after (it allows to have
# short docstrings for properties).
if code in ["RT02", "GL01", "GL02"]:
continue
# Ignore PR02: Unknown parameters for properties. We sometimes use
# properties for ducktyping, i.e. SGDClassifier.predict_proba
if code == "PR02" and Estimator is not None and method is not None:
method_obj = getattr(Estimator, method)
if isinstance(method_obj, property):
continue
# Following codes are only taken into account for the
# top level class docstrings:
# - ES01: No extended summary found
# - SA01: See Also section not found
# - EX01: No examples section found
if method is not None and code in ["EX01", "SA01", "ES01"]:
continue
yield code, message
def repr_errors(res, estimator=None, method: Optional[str] = None) -> str:
"""Pretty print original docstring and the obtained errors
Parameters
----------
res : dict
result of numpydoc.validate.validate
estimator : {estimator, None}
estimator object or None
method : str
if estimator is not None, either the method name or None.
Returns
-------
str
String representation of the error.
"""
if method is None:
if hasattr(estimator, "__init__"):
method = "__init__"
elif estimator is None:
raise ValueError("At least one of estimator, method should be provided")
else:
raise NotImplementedError
if estimator is not None:
obj = getattr(estimator, method)
try:
obj_signature = signature(obj)
except TypeError:
# In particular we can't parse the signature of properties
obj_signature = (
"\nParsing of the method signature failed, "
"possibly because this is a property."
)
obj_name = estimator.__name__ + "." + method
else:
obj_signature = ""
obj_name = method
msg = "\n\n" + "\n\n".join(
[
str(res["file"]),
obj_name + str(obj_signature),
res["docstring"],
"# Errors",
"\n".join(
" - {}: {}".format(code, message) for code, message in res["errors"]
),
]
)
return msg
@pytest.mark.parametrize("function_name", get_all_functions_names())
def test_function_docstring(function_name, request):
"""Check function docstrings using numpydoc."""
if function_name in FUNCTION_DOCSTRING_IGNORE_LIST:
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(function_name)
res["errors"] = list(filter_errors(res["errors"], method="function"))
if res["errors"]:
msg = repr_errors(res, method=f"Tested function: {function_name}")
raise ValueError(msg)
@pytest.mark.parametrize("Estimator, method", get_all_methods())
def test_docstring(Estimator, method, request):
base_import_path = Estimator.__module__
import_path = [base_import_path, Estimator.__name__]
if method is not None:
import_path.append(method)
import_path = ".".join(import_path)
if not any(re.search(regex, import_path) for regex in DOCSTRING_WHITELIST):
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(import_path)
res["errors"] = list(filter_errors(res["errors"], method))
if res["errors"]:
msg = repr_errors(res, Estimator, method)
raise ValueError(msg)
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(description="Validate docstring with numpydoc.")
parser.add_argument("import_path", help="Import path to validate")
args = parser.parse_args()
res = numpydoc_validation.validate(args.import_path)
import_path_sections = args.import_path.split(".")
# When applied to classes, detect class method. For functions
# method = None.
# TODO: this detection can be improved. Currently we assume that we have
# class # methods if the second path element before last is in camel case.
if len(import_path_sections) >= 2 and re.match(
r"(?:[A-Z][a-z]*)+", import_path_sections[-2]
):
method = import_path_sections[-1]
else:
method = None
res["errors"] = list(filter_errors(res["errors"], method))
if res["errors"]:
msg = repr_errors(res, method=args.import_path)
print(msg)
sys.exit(1)
else:
print("All docstring checks passed for {}!".format(args.import_path))
| 8,925 | 29.360544 | 85 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/base.py | """Base class for sampling"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from abc import ABCMeta, abstractmethod
import numpy as np
import sklearn
from sklearn.base import BaseEstimator
try:
# scikit-learn >= 1.2
from sklearn.base import OneToOneFeatureMixin
except ImportError:
from sklearn.base import _OneToOneFeatureMixin as OneToOneFeatureMixin
from sklearn.preprocessing import label_binarize
from sklearn.utils import parse_version
from sklearn.utils.multiclass import check_classification_targets
from .utils import check_sampling_strategy, check_target_type
from .utils._param_validation import validate_parameter_constraints
from .utils._validation import ArraysTransformer
sklearn_version = parse_version(sklearn.__version__)
class _ParamsValidationMixin:
"""Mixin class to validate parameters."""
def _validate_params(self):
"""Validate types and values of constructor parameters.
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
"""
if hasattr(self, "_parameter_constraints"):
validate_parameter_constraints(
self._parameter_constraints,
self.get_params(deep=False),
caller_name=self.__class__.__name__,
)
class SamplerMixin(_ParamsValidationMixin, BaseEstimator, metaclass=ABCMeta):
"""Mixin class for samplers with abstract method.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
_estimator_type = "sampler"
def fit(self, X, y):
"""Check inputs and statistics of the sampler.
You should use ``fit_resample`` in all cases.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Data array.
y : array-like of shape (n_samples,)
Target array.
Returns
-------
self : object
Return the instance itself.
"""
X, y, _ = self._check_X_y(X, y)
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, self._sampling_type
)
return self
def fit_resample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {array-like, dataframe, sparse matrix} of shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : array-like of shape (n_samples_new,)
The corresponding label of `X_resampled`.
"""
check_classification_targets(y)
arrays_transformer = ArraysTransformer(X, y)
X, y, binarize_y = self._check_X_y(X, y)
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, self._sampling_type
)
output = self._fit_resample(X, y)
y_ = (
label_binarize(output[1], classes=np.unique(y)) if binarize_y else output[1]
)
X_, y_ = arrays_transformer.transform(output[0], y_)
return (X_, y_) if len(output) == 2 else (X_, y_, output[2])
@abstractmethod
def _fit_resample(self, X, y):
"""Base method defined in each sampler to defined the sampling
strategy.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {ndarray, sparse matrix} of shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray of shape (n_samples_new,)
The corresponding label of `X_resampled`.
"""
pass
class BaseSampler(SamplerMixin, OneToOneFeatureMixin):
"""Base class for sampling algorithms.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
def __init__(self, sampling_strategy="auto"):
self.sampling_strategy = sampling_strategy
def _check_X_y(self, X, y, accept_sparse=None):
if accept_sparse is None:
accept_sparse = ["csr", "csc"]
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(X, y, reset=True, accept_sparse=accept_sparse)
return X, y, binarize_y
def fit(self, X, y):
"""Check inputs and statistics of the sampler.
You should use ``fit_resample`` in all cases.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Data array.
y : array-like of shape (n_samples,)
Target array.
Returns
-------
self : object
Return the instance itself.
"""
self._validate_params()
return super().fit(X, y)
def fit_resample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {array-like, dataframe, sparse matrix} of shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : array-like of shape (n_samples_new,)
The corresponding label of `X_resampled`.
"""
self._validate_params()
return super().fit_resample(X, y)
def _more_tags(self):
return {"X_types": ["2darray", "sparse", "dataframe"]}
def _identity(X, y):
return X, y
def is_sampler(estimator):
"""Return True if the given estimator is a sampler, False otherwise.
Parameters
----------
estimator : object
Estimator to test.
Returns
-------
is_sampler : bool
True if estimator is a sampler, otherwise False.
"""
if estimator._estimator_type == "sampler":
return True
return False
class FunctionSampler(BaseSampler):
"""Construct a sampler from calling an arbitrary callable.
Read more in the :ref:`User Guide <function_sampler>`.
Parameters
----------
func : callable, default=None
The callable to use for the transformation. This will be passed the
same arguments as transform, with args and kwargs forwarded. If func is
None, then func will be the identity function.
accept_sparse : bool, default=True
Whether sparse input are supported. By default, sparse inputs are
supported.
kw_args : dict, default=None
The keyword argument expected by ``func``.
validate : bool, default=True
Whether or not to bypass the validation of ``X`` and ``y``. Turning-off
validation allows to use the ``FunctionSampler`` with any type of
data.
.. versionadded:: 0.6
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
sklearn.preprocessing.FunctionTransfomer : Stateless transformer.
Notes
-----
See
:ref:`sphx_glr_auto_examples_applications_plot_outlier_rejections.py`
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_classification
>>> from imblearn import FunctionSampler
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
We can create to select only the first ten samples for instance.
>>> def func(X, y):
... return X[:10], y[:10]
>>> sampler = FunctionSampler(func=func)
>>> X_res, y_res = sampler.fit_resample(X, y)
>>> np.all(X_res == X[:10])
True
>>> np.all(y_res == y[:10])
True
We can also create a specific function which take some arguments.
>>> from collections import Counter
>>> from imblearn.under_sampling import RandomUnderSampler
>>> def func(X, y, sampling_strategy, random_state):
... return RandomUnderSampler(
... sampling_strategy=sampling_strategy,
... random_state=random_state).fit_resample(X, y)
>>> sampler = FunctionSampler(func=func,
... kw_args={'sampling_strategy': 'auto',
... 'random_state': 0})
>>> X_res, y_res = sampler.fit_resample(X, y)
>>> print(f'Resampled dataset shape {sorted(Counter(y_res).items())}')
Resampled dataset shape [(0, 100), (1, 100)]
"""
_sampling_type = "bypass"
_parameter_constraints: dict = {
"func": [callable, None],
"accept_sparse": ["boolean"],
"kw_args": [dict, None],
"validate": ["boolean"],
}
def __init__(self, *, func=None, accept_sparse=True, kw_args=None, validate=True):
super().__init__()
self.func = func
self.accept_sparse = accept_sparse
self.kw_args = kw_args
self.validate = validate
def fit(self, X, y):
"""Check inputs and statistics of the sampler.
You should use ``fit_resample`` in all cases.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Data array.
y : array-like of shape (n_samples,)
Target array.
Returns
-------
self : object
Return the instance itself.
"""
self._validate_params()
# we need to overwrite SamplerMixin.fit to bypass the validation
if self.validate:
check_classification_targets(y)
X, y, _ = self._check_X_y(X, y, accept_sparse=self.accept_sparse)
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, self._sampling_type
)
return self
def fit_resample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {array-like, sparse matrix} of shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : array-like of shape (n_samples_new,)
The corresponding label of `X_resampled`.
"""
self._validate_params()
arrays_transformer = ArraysTransformer(X, y)
if self.validate:
check_classification_targets(y)
X, y, binarize_y = self._check_X_y(X, y, accept_sparse=self.accept_sparse)
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, self._sampling_type
)
output = self._fit_resample(X, y)
if self.validate:
y_ = (
label_binarize(output[1], classes=np.unique(y))
if binarize_y
else output[1]
)
X_, y_ = arrays_transformer.transform(output[0], y_)
return (X_, y_) if len(output) == 2 else (X_, y_, output[2])
return output
def _fit_resample(self, X, y):
func = _identity if self.func is None else self.func
output = func(X, y, **(self.kw_args if self.kw_args else {}))
return output
| 12,954 | 29.845238 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/exceptions.py | """
The :mod:`imblearn.exceptions` module includes all custom warnings and error
classes and functions used across imbalanced-learn.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
def raise_isinstance_error(variable_name, possible_type, variable):
"""Raise consistent error message for isinstance() function.
Parameters
----------
variable_name : str
The name of the variable.
possible_type : type
The possible type of the variable.
variable : object
The variable to check.
Raises
------
ValueError
If the instance is not of the possible type.
"""
raise ValueError(
f"{variable_name} has to be one of {possible_type}. "
f"Got {type(variable)} instead."
)
| 785 | 22.818182 | 76 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/_config.py | """This is copy of sklearn/_config.py
# TODO: remove this file when scikit-learn minimum version is 1.3
We remove the array_api_dispatch for the moment.
"""
import os
import threading
from contextlib import contextmanager as contextmanager
import sklearn
from sklearn.utils import parse_version
sklearn_version = parse_version(sklearn.__version__)
if sklearn_version < parse_version("1.3"):
_global_config = {
"assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)),
"working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)),
"print_changed_only": True,
"display": "diagram",
"pairwise_dist_chunk_size": int(
os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256)
),
"enable_cython_pairwise_dist": True,
"transform_output": "default",
"enable_metadata_routing": False,
"skip_parameter_validation": False,
}
_threadlocal = threading.local()
def _get_threadlocal_config():
"""Get a threadlocal **mutable** configuration. If the configuration
does not exist, copy the default global configuration."""
if not hasattr(_threadlocal, "global_config"):
_threadlocal.global_config = _global_config.copy()
return _threadlocal.global_config
def get_config():
"""Retrieve current values for configuration set by :func:`set_config`.
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
See Also
--------
config_context : Context manager for global scikit-learn configuration.
set_config : Set global scikit-learn configuration.
"""
# Return a copy of the threadlocal configuration so that users will
# not be able to modify the configuration with the returned dict.
return _get_threadlocal_config().copy()
def set_config(
assume_finite=None,
working_memory=None,
print_changed_only=None,
display=None,
pairwise_dist_chunk_size=None,
enable_cython_pairwise_dist=None,
transform_output=None,
enable_metadata_routing=None,
skip_parameter_validation=None,
):
"""Set global scikit-learn configuration
.. versionadded:: 0.19
Parameters
----------
assume_finite : bool, default=None
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error. Global default: False.
.. versionadded:: 0.19
working_memory : int, default=None
If set, scikit-learn will attempt to limit the size of temporary arrays
to this number of MiB (per job when parallelised), often saving both
computation time and memory on expensive operations that can be
performed in chunks. Global default: 1024.
.. versionadded:: 0.20
print_changed_only : bool, default=None
If True, only the parameters that were set to non-default
values will be printed when printing an estimator. For example,
``print(SVC())`` while True will only print 'SVC()' while the default
behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with
all the non-changed parameters.
.. versionadded:: 0.21
display : {'text', 'diagram'}, default=None
If 'diagram', estimators will be displayed as a diagram in a Jupyter
lab or notebook context. If 'text', estimators will be displayed as
text. Default is 'diagram'.
.. versionadded:: 0.23
pairwise_dist_chunk_size : int, default=None
The number of row vectors per chunk for the accelerated pairwise-
distances reduction backend. Default is 256 (suitable for most of
modern laptops' caches and architectures).
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
enable_cython_pairwise_dist : bool, default=None
Use the accelerated pairwise-distances reduction backend when
possible. Global default: True.
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
transform_output : str, default=None
Configure output of `transform` and `fit_transform`.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.2
enable_metadata_routing : bool, default=None
Enable metadata routing. By default this feature is disabled.
Refer to :ref:`metadata routing user guide <metadata_routing>` for more
details.
- `True`: Metadata routing is enabled
- `False`: Metadata routing is disabled, use the old syntax.
- `None`: Configuration is unchanged
.. versionadded:: 1.3
skip_parameter_validation : bool, default=None
If `True`, disable the validation of the hyper-parameters' types
and values in the fit method of estimators and for arguments passed
to public helper functions. It can save time in some situations but
can lead to low level crashes and exceptions with confusing error
messages.
Note that for data parameters, such as `X` and `y`, only type validation is
skipped but validation with `check_array` will continue to run.
.. versionadded:: 1.3
See Also
--------
config_context : Context manager for global scikit-learn configuration.
get_config : Retrieve current values of the global configuration.
"""
local_config = _get_threadlocal_config()
if assume_finite is not None:
local_config["assume_finite"] = assume_finite
if working_memory is not None:
local_config["working_memory"] = working_memory
if print_changed_only is not None:
local_config["print_changed_only"] = print_changed_only
if display is not None:
local_config["display"] = display
if pairwise_dist_chunk_size is not None:
local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size
if enable_cython_pairwise_dist is not None:
local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist
if transform_output is not None:
local_config["transform_output"] = transform_output
if enable_metadata_routing is not None:
local_config["enable_metadata_routing"] = enable_metadata_routing
if skip_parameter_validation is not None:
local_config["skip_parameter_validation"] = skip_parameter_validation
@contextmanager
def config_context(
*,
assume_finite=None,
working_memory=None,
print_changed_only=None,
display=None,
pairwise_dist_chunk_size=None,
enable_cython_pairwise_dist=None,
transform_output=None,
enable_metadata_routing=None,
skip_parameter_validation=None,
):
"""Context manager for global scikit-learn configuration.
Parameters
----------
assume_finite : bool, default=None
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error. If None, the existing value won't change.
The default value is False.
working_memory : int, default=None
If set, scikit-learn will attempt to limit the size of temporary arrays
to this number of MiB (per job when parallelised), often saving both
computation time and memory on expensive operations that can be
performed in chunks. If None, the existing value won't change.
The default value is 1024.
print_changed_only : bool, default=None
If True, only the parameters that were set to non-default
values will be printed when printing an estimator. For example,
``print(SVC())`` while True will only print 'SVC()', but would print
'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters
when False. If None, the existing value won't change.
The default value is True.
.. versionchanged:: 0.23
Default changed from False to True.
display : {'text', 'diagram'}, default=None
If 'diagram', estimators will be displayed as a diagram in a Jupyter
lab or notebook context. If 'text', estimators will be displayed as
text. If None, the existing value won't change.
The default value is 'diagram'.
.. versionadded:: 0.23
pairwise_dist_chunk_size : int, default=None
The number of row vectors per chunk for the accelerated pairwise-
distances reduction backend. Default is 256 (suitable for most of
modern laptops' caches and architectures).
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
enable_cython_pairwise_dist : bool, default=None
Use the accelerated pairwise-distances reduction backend when
possible. Global default: True.
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
transform_output : str, default=None
Configure output of `transform` and `fit_transform`.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.2
enable_metadata_routing : bool, default=None
Enable metadata routing. By default this feature is disabled.
Refer to :ref:`metadata routing user guide <metadata_routing>` for more
details.
- `True`: Metadata routing is enabled
- `False`: Metadata routing is disabled, use the old syntax.
- `None`: Configuration is unchanged
.. versionadded:: 1.3
skip_parameter_validation : bool, default=None
If `True`, disable the validation of the hyper-parameters' types
and values in the fit method of estimators and for arguments passed
to public helper functions. It can save time in some situations but
can lead to low level crashes and exceptions with confusing error
messages.
Note that for data parameters, such as `X` and `y`, only type validation is
skipped but validation with `check_array` will continue to run.
.. versionadded:: 1.3
Yields
------
None.
See Also
--------
set_config : Set global scikit-learn configuration.
get_config : Retrieve current values of the global configuration.
Notes
-----
All settings, not just those presently modified, will be returned to
their previous values when the context manager is exited.
Examples
--------
>>> import sklearn
>>> from sklearn.utils.validation import assert_all_finite
>>> with sklearn.config_context(assume_finite=True):
... assert_all_finite([float('nan')])
>>> with sklearn.config_context(assume_finite=True):
... with sklearn.config_context(assume_finite=False):
... assert_all_finite([float('nan')])
Traceback (most recent call last):
...
ValueError: Input contains NaN...
"""
old_config = get_config()
set_config(
assume_finite=assume_finite,
working_memory=working_memory,
print_changed_only=print_changed_only,
display=display,
pairwise_dist_chunk_size=pairwise_dist_chunk_size,
enable_cython_pairwise_dist=enable_cython_pairwise_dist,
transform_output=transform_output,
enable_metadata_routing=enable_metadata_routing,
skip_parameter_validation=skip_parameter_validation,
)
try:
yield
finally:
set_config(**old_config)
else:
from sklearn._config import ( # type: ignore[no-redef]
_get_threadlocal_config,
_global_config,
config_context, # noqa
get_config,
)
| 13,655 | 38.582609 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/_version.py | """
``imbalanced-learn`` is a set of python methods to deal with imbalanced
datset in machine learning and pattern recognition.
"""
# Based on NiLearn package
# License: simplified BSD
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "0.12.0.dev0"
| 629 | 23.230769 | 71 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/_min_dependencies.py | """All minimum dependencies for imbalanced-learn."""
import argparse
NUMPY_MIN_VERSION = "1.17.3"
SCIPY_MIN_VERSION = "1.5.0"
PANDAS_MIN_VERSION = "1.0.5"
SKLEARN_MIN_VERSION = "1.0.2"
TENSORFLOW_MIN_VERSION = "2.4.3"
KERAS_MIN_VERSION = "2.4.3"
JOBLIB_MIN_VERSION = "1.1.1"
THREADPOOLCTL_MIN_VERSION = "2.0.0"
PYTEST_MIN_VERSION = "5.0.1"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "install"),
"scipy": (SCIPY_MIN_VERSION, "install"),
"scikit-learn": (SKLEARN_MIN_VERSION, "install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"pandas": (PANDAS_MIN_VERSION, "optional, docs, examples, tests"),
"tensorflow": (TENSORFLOW_MIN_VERSION, "optional, docs, examples, tests"),
"keras": (KERAS_MIN_VERSION, "optional, docs, examples, tests"),
"matplotlib": ("3.1.2", "docs, examples"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"flake8": ("3.8.2", "tests"),
"black": ("23.3.0", "tests"),
"mypy": ("1.3.0", "tests"),
"sphinx": ("6.0.0", "docs"),
"sphinx-gallery": ("0.13.0", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"numpydoc": ("1.5.0", "docs"),
"sphinxcontrib-bibtex": ("2.4.1", "docs"),
"pydata-sphinx-theme": ("0.13.3", "docs"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = {
extra: [] for extra in ["install", "optional", "docs", "examples", "tests"]
}
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
| 2,240 | 36.35 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/__init__.py | """Toolbox for imbalanced dataset in machine learning.
``imbalanced-learn`` is a set of python methods to deal with imbalanced
datset in machine learning and pattern recognition.
Subpackages
-----------
combine
Module which provides methods based on over-sampling and under-sampling.
ensemble
Module which provides methods generating an ensemble of
under-sampled subsets.
exceptions
Module including custom warnings and error clases used across
imbalanced-learn.
keras
Module which provides custom generator, layers for deep learning using
keras.
metrics
Module which provides metrics to quantified the classification performance
with imbalanced dataset.
over_sampling
Module which provides methods to over-sample a dataset.
tensorflow
Module which provides custom generator, layers for deep learning using
tensorflow.
under-sampling
Module which provides methods to under-sample a dataset.
utils
Module including various utilities.
pipeline
Module which allowing to create pipeline with scikit-learn estimators.
"""
import importlib
import sys
import types
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to enable importing subpackages of sklearn when
# the binaries are not built
# mypy error: Cannot determine type of '__SKLEARN_SETUP__'
__IMBLEARN_SETUP__ # type: ignore
except NameError:
__IMBLEARN_SETUP__ = False
if __IMBLEARN_SETUP__:
sys.stderr.write("Partial import of imblearn during the build process.\n")
# We are not importing the rest of scikit-learn during the build
# process, as it may not be compiled yet
else:
from . import (
combine,
ensemble,
exceptions,
metrics,
over_sampling,
pipeline,
tensorflow,
under_sampling,
utils,
)
from ._version import __version__
from .base import FunctionSampler
from .utils._show_versions import show_versions # noqa: F401
# FIXME: When we get Python 3.7 as minimal version, we will need to switch to
# the following solution:
# https://snarky.ca/lazy-importing-in-python-3-7/
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
Adapted from TensorFlow:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/
python/util/lazy_loader.py
"""
def __init__(self, local_name, parent_module_globals, name, warning=None):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on
# lookups that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
# delay the import of keras since we are going to import either tensorflow
# or keras
keras = LazyLoader("keras", globals(), "imblearn.keras")
__all__ = [
"combine",
"ensemble",
"exceptions",
"keras",
"metrics",
"over_sampling",
"tensorflow",
"under_sampling",
"utils",
"pipeline",
"FunctionSampler",
"__version__",
]
| 3,963 | 30.967742 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/pipeline.py | """
The :mod:`imblearn.pipeline` module implements utilities to build a
composite estimator, as a chain of transforms, samples and estimators.
"""
# Adapted from scikit-learn
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD
import joblib
from sklearn import pipeline
from sklearn.base import clone
from sklearn.utils import _print_elapsed_time
from sklearn.utils.metaestimators import available_if
from .base import _ParamsValidationMixin
from .utils._param_validation import HasMethods, validate_params
__all__ = ["Pipeline", "make_pipeline"]
class Pipeline(_ParamsValidationMixin, pipeline.Pipeline):
"""Pipeline of transforms and resamples with a final estimator.
Sequentially apply a list of transforms, sampling, and a final estimator.
Intermediate steps of the pipeline must be transformers or resamplers,
that is, they must implement fit, transform and sample methods.
The samplers are only applied during fit.
The final estimator only needs to implement fit.
The transformers and samplers in the pipeline can be cached using
``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
it to 'passthrough' or ``None``.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing
fit/transform/fit_resample) that are chained, in the order in which
they are chained, with the last object an estimator.
memory : Instance of joblib.Memory or str, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Attributes
----------
named_steps : :class:`~sklearn.utils.Bunch`
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during first step `fit` method.
See Also
--------
make_pipeline : Helper function to make pipeline.
Notes
-----
See :ref:`sphx_glr_auto_examples_pipeline_plot_pipeline_classification.py`
.. warning::
A surprising behaviour of the `imbalanced-learn` pipeline is that it
breaks the `scikit-learn` contract where one expects
`estimmator.fit_transform(X, y)` to be equivalent to
`estimator.fit(X, y).transform(X)`.
The semantic of `fit_resample` is to be applied only during the fit
stage. Therefore, resampling will happen when calling `fit_transform`
while it will only happen on the `fit` stage when calling `fit` and
`transform` separately. Practically, `fit_transform` will lead to a
resampled dataset while `fit` and `transform` will not.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split as tts
>>> from sklearn.decomposition import PCA
>>> from sklearn.neighbors import KNeighborsClassifier as KNN
>>> from sklearn.metrics import classification_report
>>> from imblearn.over_sampling import SMOTE
>>> from imblearn.pipeline import Pipeline
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print(f'Original dataset shape {Counter(y)}')
Original dataset shape Counter({1: 900, 0: 100})
>>> pca = PCA()
>>> smt = SMOTE(random_state=42)
>>> knn = KNN()
>>> pipeline = Pipeline([('smt', smt), ('pca', pca), ('knn', knn)])
>>> X_train, X_test, y_train, y_test = tts(X, y, random_state=42)
>>> pipeline.fit(X_train, y_train)
Pipeline(...)
>>> y_hat = pipeline.predict(X_test)
>>> print(classification_report(y_test, y_hat))
precision recall f1-score support
<BLANKLINE>
0 0.87 1.00 0.93 26
1 1.00 0.98 0.99 224
<BLANKLINE>
accuracy 0.98 250
macro avg 0.93 0.99 0.96 250
weighted avg 0.99 0.98 0.98 250
<BLANKLINE>
"""
_parameter_constraints: dict = {
"steps": "no_validation", # validated in `_validate_steps`
"memory": [None, str, HasMethods(["cache"])],
"verbose": ["boolean"],
}
# BaseEstimator interface
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (
hasattr(t, "fit")
or hasattr(t, "fit_transform")
or hasattr(t, "fit_resample")
) or not (hasattr(t, "transform") or hasattr(t, "fit_resample")):
raise TypeError(
"All intermediate steps of the chain should "
"be estimators that implement fit and transform or "
"fit_resample (but not both) or be a string 'passthrough' "
"'%s' (type %s) doesn't)" % (t, type(t))
)
if hasattr(t, "fit_resample") and (
hasattr(t, "fit_transform") or hasattr(t, "transform")
):
raise TypeError(
"All intermediate steps of the chain should "
"be estimators that implement fit and transform or "
"fit_resample."
" '%s' implements both)" % (t)
)
if isinstance(t, pipeline.Pipeline):
raise TypeError(
"All intermediate steps of the chain should not be Pipelines"
)
# We allow last estimator to be None as an identity transformation
if (
estimator is not None
and estimator != "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit or be "
"the string 'passthrough'. '%s' (type %s) doesn't"
% (estimator, type(estimator))
)
def _iter(self, with_final=True, filter_passthrough=True, filter_resample=True):
"""Generate (idx, (name, trans)) tuples from self.steps.
When `filter_passthrough` is `True`, 'passthrough' and None
transformers are filtered out. When `filter_resample` is `True`,
estimator with a method `fit_resample` are filtered out.
"""
it = super()._iter(with_final, filter_passthrough)
if filter_resample:
return filter(lambda x: not hasattr(x[-1], "fit_resample"), it)
else:
return it
# Estimator interface
def _fit(self, X, y=None, **fit_params_steps):
self.steps = list(self.steps)
self._validate_steps()
# Setup the memory
if self.memory is None or isinstance(self.memory, str):
memory = joblib.Memory(location=self.memory, verbose=0)
else:
memory = self.memory
fit_transform_one_cached = memory.cache(pipeline._fit_transform_one)
fit_resample_one_cached = memory.cache(_fit_resample_one)
for step_idx, name, transformer in self._iter(
with_final=False, filter_passthrough=False, filter_resample=False
):
if transformer is None or transformer == "passthrough":
with _print_elapsed_time("Pipeline", self._log_message(step_idx)):
continue
try:
# joblib >= 0.12
mem = memory.location
except AttributeError:
mem = memory.cachedir
finally:
cloned_transformer = clone(transformer) if mem else transformer
# Fit or load from cache the current transformer
if hasattr(cloned_transformer, "transform") or hasattr(
cloned_transformer, "fit_transform"
):
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer,
X,
y,
None,
message_clsname="Pipeline",
message=self._log_message(step_idx),
**fit_params_steps[name],
)
elif hasattr(cloned_transformer, "fit_resample"):
X, y, fitted_transformer = fit_resample_one_cached(
cloned_transformer,
X,
y,
message_clsname="Pipeline",
message=self._log_message(step_idx),
**fit_params_steps[name],
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
return X, y
def fit(self, X, y=None, **fit_params):
"""Fit the model.
Fit all the transforms/samplers one after the other and
transform/sample the data, then fit the transformed/sampled
data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator.
"""
self._validate_params()
fit_params_steps = self._check_fit_params(**fit_params)
Xt, yt = self._fit(X, y, **fit_params_steps)
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if self._final_estimator != "passthrough":
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
self._final_estimator.fit(Xt, yt, **fit_params_last_step)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator.
Fits all the transformers/samplers one after the other and
transform/sample the data, then uses fit_transform on
transformed data with the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples.
"""
self._validate_params()
fit_params_steps = self._check_fit_params(**fit_params)
Xt, yt = self._fit(X, y, **fit_params_steps)
last_step = self._final_estimator
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
if hasattr(last_step, "fit_transform"):
return last_step.fit_transform(Xt, yt, **fit_params_last_step)
else:
return last_step.fit(Xt, yt, **fit_params_last_step).transform(Xt)
def fit_resample(self, X, y=None, **fit_params):
"""Fit the model and sample with the final estimator.
Fits all the transformers/samplers one after the other and
transform/sample the data, then uses fit_resample on transformed
data with the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples.
yt : array-like of shape (n_samples, n_transformed_features)
Transformed target.
"""
self._validate_params()
fit_params_steps = self._check_fit_params(**fit_params)
Xt, yt = self._fit(X, y, **fit_params_steps)
last_step = self._final_estimator
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
if hasattr(last_step, "fit_resample"):
return last_step.fit_resample(Xt, yt, **fit_params_last_step)
@available_if(pipeline._final_estimator_has("fit_predict"))
def fit_predict(self, X, y=None, **fit_params):
"""Apply `fit_predict` of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted target.
"""
self._validate_params()
fit_params_steps = self._check_fit_params(**fit_params)
Xt, yt = self._fit(X, y, **fit_params_steps)
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][-1].fit_predict(Xt, yt, **fit_params_last_step)
return y_pred
def _fit_resample_one(sampler, X, y, message_clsname="", message=None, **fit_params):
with _print_elapsed_time(message_clsname, message):
X_res, y_res = sampler.fit_resample(X, y, **fit_params)
return X_res, y_res, sampler
@validate_params(
{"memory": [None, str, HasMethods(["cache"])], "verbose": ["boolean"]},
prefer_skip_nested_validation=True,
)
def make_pipeline(*steps, memory=None, verbose=False):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators
A list of estimators.
memory : None, str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
Returns an imbalanced-learn `Pipeline` instance that handles samplers.
See Also
--------
imblearn.pipeline.Pipeline : Class for creating a pipeline of
transforms with a final estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
return Pipeline(pipeline._name_estimators(steps), memory=memory, verbose=verbose)
| 18,767 | 38.428571 | 85 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/base.py | """
Base class for the under-sampling method.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numbers
from collections.abc import Mapping
from ..base import BaseSampler
from ..utils._param_validation import Interval, StrOptions
class BaseUnderSampler(BaseSampler):
"""Base class for under-sampling algorithms.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
_sampling_type = "under-sampling"
_sampling_strategy_docstring = """sampling_strategy : float, str, dict, callable, default='auto'
Sampling information to sample the data set.
- When ``float``, it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{us} = N_{m} / N_{rM}` where :math:`N_{m}` is the
number of samples in the minority class and
:math:`N_{rM}` is the number of samples in the majority class
after resampling.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.
- When ``str``, specify the class targeted by the resampling. The
number of samples in the different classes will be equalized.
Possible choices are:
``'majority'``: resample only the majority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not minority'``.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
""".rstrip() # noqa: E501
_parameter_constraints: dict = {
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "majority", "not minority", "not majority", "all"}),
Mapping,
callable,
],
}
class BaseCleaningSampler(BaseSampler):
"""Base class for under-sampling algorithms.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
_sampling_type = "clean-sampling"
_sampling_strategy_docstring = """sampling_strategy : str, list or callable
Sampling information to sample the data set.
- When ``str``, specify the class targeted by the resampling. Note the
the number of samples will not be equal in each. Possible choices
are:
``'majority'``: resample only the majority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not minority'``.
- When ``list``, the list contains the classes targeted by the
resampling.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
""".rstrip()
_parameter_constraints: dict = {
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "majority", "not minority", "not majority", "all"}),
list,
callable,
],
}
| 3,904 | 33.557522 | 100 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/__init__.py | """
The :mod:`imblearn.under_sampling` provides methods to under-sample
a dataset.
"""
from ._prototype_generation import ClusterCentroids
from ._prototype_selection import (
AllKNN,
CondensedNearestNeighbour,
EditedNearestNeighbours,
InstanceHardnessThreshold,
NearMiss,
NeighbourhoodCleaningRule,
OneSidedSelection,
RandomUnderSampler,
RepeatedEditedNearestNeighbours,
TomekLinks,
)
__all__ = [
"ClusterCentroids",
"RandomUnderSampler",
"InstanceHardnessThreshold",
"NearMiss",
"TomekLinks",
"EditedNearestNeighbours",
"RepeatedEditedNearestNeighbours",
"AllKNN",
"OneSidedSelection",
"CondensedNearestNeighbour",
"NeighbourhoodCleaningRule",
]
| 733 | 21.242424 | 67 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_edited_nearest_neighbours.py | """Classes to perform under-sampling based on the edited nearest neighbour
method."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Dayvid Oliveira
# Christos Aridas
# License: MIT
import numbers
from collections import Counter
import numpy as np
from sklearn.utils import _safe_indexing
from ...utils import Substitution, check_neighbors_object
from ...utils._docstring import _n_jobs_docstring
from ...utils._param_validation import HasMethods, Interval, StrOptions
from ...utils.fixes import _mode
from ..base import BaseCleaningSampler
SEL_KIND = ("all", "mode")
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class EditedNearestNeighbours(BaseCleaningSampler):
"""Undersample based on the edited nearest neighbour method.
This method cleans the dataset by removing samples close to the
decision boundary. It removes observations from the majority class or
classes when any or most of its closest neighours are from a different class.
Read more in the :ref:`User Guide <edited_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
n_neighbors : int or object, default=3
If ``int``, size of the neighbourhood to consider for the undersampling, i.e.,
if `n_neighbors=3`, a sample will be removed when any or most of its 3 closest
neighbours are from a different class. If object, an estimator that inherits
from :class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors. Note that if you want to examine the 3 closest
neighbours of a sample for the undersampling, you need to pass a 4-KNN.
kind_sel : {{'all', 'mode'}}, default='all'
Strategy to use to exclude samples.
- If ``'all'``, all neighbours should be of the same class of the examined
sample for it not be excluded.
- If ``'mode'``, most neighbours should be of the same class of the examined
sample for it not be excluded.
The strategy `"all"` will be less conservative than `'mode'`. Thus,
more samples will be removed when `kind_sel="all"`, generally.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
correspond to the class labels from which to sample and the values
are the number of samples to sample.
nn_ : estimator object
Validated K-nearest Neighbours instance created from `n_neighbors` parameter.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
CondensedNearestNeighbour : Undersample by condensing samples.
RepeatedEditedNearestNeighbours : Undersample by repeating the ENN algorithm.
AllKNN : Undersample using ENN with varying neighbours.
Notes
-----
The method is based on [1]_.
Supports multi-class resampling. A one-vs.-rest scheme is used when
sampling a class as proposed in [1]_.
References
----------
.. [1] D. Wilson, Asymptotic" Properties of Nearest Neighbor Rules Using
Edited Data," In IEEE Transactions on Systems, Man, and Cybernetrics,
vol. 2 (3), pp. 408-421, 1972.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import EditedNearestNeighbours
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> enn = EditedNearestNeighbours()
>>> X_res, y_res = enn.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 887, 0: 100}})
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"kind_sel": [StrOptions({"all", "mode"})],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
n_neighbors=3,
kind_sel="all",
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.n_neighbors = n_neighbors
self.kind_sel = kind_sel
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Validate the estimator created in the ENN."""
self.nn_ = check_neighbors_object(
"n_neighbors", self.n_neighbors, additional_neighbor=1
)
self.nn_.set_params(**{"n_jobs": self.n_jobs})
def _fit_resample(self, X, y):
self._validate_estimator()
idx_under = np.empty((0,), dtype=int)
self.nn_.fit(X)
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
target_class_indices = np.flatnonzero(y == target_class)
X_class = _safe_indexing(X, target_class_indices)
y_class = _safe_indexing(y, target_class_indices)
nnhood_idx = self.nn_.kneighbors(X_class, return_distance=False)[:, 1:]
nnhood_label = y[nnhood_idx]
if self.kind_sel == "mode":
nnhood_label, _ = _mode(nnhood_label, axis=1)
nnhood_bool = np.ravel(nnhood_label) == y_class
elif self.kind_sel == "all":
nnhood_label = nnhood_label == target_class
nnhood_bool = np.all(nnhood_label, axis=1)
index_target_class = np.flatnonzero(nnhood_bool)
else:
index_target_class = slice(None)
idx_under = np.concatenate(
(
idx_under,
np.flatnonzero(y == target_class)[index_target_class],
),
axis=0,
)
self.sample_indices_ = idx_under
return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)
def _more_tags(self):
return {"sample_indices": True}
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class RepeatedEditedNearestNeighbours(BaseCleaningSampler):
"""Undersample based on the repeated edited nearest neighbour method.
This method repeats the :class:`EditedNearestNeighbours` algorithm several times.
The repetitions will stop when i) the maximum number of iterations is reached,
or ii) no more observations are being removed, or iii) one of the majority classes
becomes a minority class or iv) one of the majority classes disappears
during undersampling.
Read more in the :ref:`User Guide <edited_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
n_neighbors : int or object, default=3
If ``int``, size of the neighbourhood to consider for the undersampling, i.e.,
if `n_neighbors=3`, a sample will be removed when any or most of its 3 closest
neighbours are from a different class. If object, an estimator that inherits
from :class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors. Note that if you want to examine the 3 closest
neighbours of a sample for the undersampling, you need to pass a 4-KNN.
max_iter : int, default=100
Maximum number of iterations of the edited nearest neighbours.
kind_sel : {{'all', 'mode'}}, default='all'
Strategy to use to exclude samples.
- If ``'all'``, all neighbours should be of the same class of the examined
sample for it not be excluded.
- If ``'mode'``, most neighbours should be of the same class of the examined
sample for it not be excluded.
The strategy `"all"` will be less conservative than `'mode'`. Thus,
more samples will be removed when `kind_sel="all"`, generally.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
correspond to the class labels from which to sample and the values
are the number of samples to sample.
nn_ : estimator object
Validated K-nearest Neighbours estimator linked to the parameter `n_neighbors`.
enn_ : sampler object
The validated :class:`~imblearn.under_sampling.EditedNearestNeighbours`
instance.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_iter_ : int
Number of iterations run.
.. versionadded:: 0.6
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
CondensedNearestNeighbour : Undersample by condensing samples.
EditedNearestNeighbours : Undersample by editing samples.
AllKNN : Undersample using ENN with varying neighbours.
Notes
-----
The method is based on [1]_. A one-vs.-rest scheme is used when
sampling a class as proposed in [1]_.
Supports multi-class resampling.
References
----------
.. [1] I. Tomek, "An Experiment with the Edited Nearest-Neighbor
Rule," IEEE Transactions on Systems, Man, and Cybernetics, vol. 6(6),
pp. 448-452, June 1976.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import RepeatedEditedNearestNeighbours
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> renn = RepeatedEditedNearestNeighbours()
>>> X_res, y_res = renn.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 887, 0: 100}})
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"max_iter": [Interval(numbers.Integral, 1, None, closed="left")],
"kind_sel": [StrOptions({"all", "mode"})],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
n_neighbors=3,
max_iter=100,
kind_sel="all",
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.n_neighbors = n_neighbors
self.kind_sel = kind_sel
self.n_jobs = n_jobs
self.max_iter = max_iter
def _validate_estimator(self):
"""Private function to create the NN estimator"""
self.nn_ = check_neighbors_object(
"n_neighbors", self.n_neighbors, additional_neighbor=1
)
self.enn_ = EditedNearestNeighbours(
sampling_strategy=self.sampling_strategy,
n_neighbors=self.nn_,
kind_sel=self.kind_sel,
n_jobs=self.n_jobs,
)
def _fit_resample(self, X, y):
self._validate_estimator()
X_, y_ = X, y
self.sample_indices_ = np.arange(X.shape[0], dtype=int)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
for n_iter in range(self.max_iter):
prev_len = y_.shape[0]
X_enn, y_enn = self.enn_.fit_resample(X_, y_)
# Check the stopping criterion
# 1. If there is no changes for the vector y
# 2. If the number of samples in the other class become inferior to
# the number of samples in the majority class
# 3. If one of the class is disappearing
# Case 1
b_conv = prev_len == y_enn.shape[0]
# Case 2
stats_enn = Counter(y_enn)
count_non_min = np.array(
[
val
for val, key in zip(stats_enn.values(), stats_enn.keys())
if key != class_minority
]
)
b_min_bec_maj = np.any(count_non_min < target_stats[class_minority])
# Case 3
b_remove_maj_class = len(stats_enn) < len(target_stats)
(
X_,
y_,
) = (
X_enn,
y_enn,
)
self.sample_indices_ = self.sample_indices_[self.enn_.sample_indices_]
if b_conv or b_min_bec_maj or b_remove_maj_class:
if b_conv:
(
X_,
y_,
) = (
X_enn,
y_enn,
)
self.sample_indices_ = self.sample_indices_[
self.enn_.sample_indices_
]
break
self.n_iter_ = n_iter + 1
X_resampled, y_resampled = X_, y_
return X_resampled, y_resampled
def _more_tags(self):
return {"sample_indices": True}
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class AllKNN(BaseCleaningSampler):
"""Undersample based on the AllKNN method.
This method will apply :class:`EditedNearestNeighbours` several times varying the
number of nearest neighbours at each round. It begins by examining 1 closest
neighbour, and it incrases the neighbourhood by 1 at each round.
The algorithm stops when the maximum number of neighbours are examined or
when the majority class becomes the minority class, whichever comes first.
Read more in the :ref:`User Guide <edited_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
n_neighbors : int or estimator object, default=3
If ``int``, size of the maximum neighbourhood to examine for the undersampling.
If `n_neighbors=3`, in the first iteration the algorithm will examine 1 closest
neigbhour, in the second round 2, and in the final round 3. If object, an
estimator that inherits from :class:`~sklearn.neighbors.base.KNeighborsMixin`
that will be used to find the nearest-neighbors. Note that if you want to
examine the 3 closest neighbours of a sample, you need to pass a 4-KNN.
kind_sel : {{'all', 'mode'}}, default='all'
Strategy to use to exclude samples.
- If ``'all'``, all neighbours should be of the same class of the examined
sample for it not be excluded.
- If ``'mode'``, most neighbours should be of the same class of the examined
sample for it not be excluded.
The strategy `"all"` will be less conservative than `'mode'`. Thus,
more samples will be removed when `kind_sel="all"`, generally.
allow_minority : bool, default=False
If ``True``, it allows the majority classes to become the minority
class without early stopping.
.. versionadded:: 0.3
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
correspond to the class labels from which to sample and the values
are the number of samples to sample.
nn_ : estimator object
Validated K-nearest Neighbours estimator linked to the parameter `n_neighbors`.
enn_ : sampler object
The validated :class:`~imblearn.under_sampling.EditedNearestNeighbours`
instance.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
CondensedNearestNeighbour: Under-sampling by condensing samples.
EditedNearestNeighbours: Under-sampling by editing samples.
RepeatedEditedNearestNeighbours: Under-sampling by repeating ENN.
Notes
-----
The method is based on [1]_.
Supports multi-class resampling. A one-vs.-rest scheme is used when
sampling a class as proposed in [1]_.
References
----------
.. [1] I. Tomek, "An Experiment with the Edited Nearest-Neighbor
Rule," IEEE Transactions on Systems, Man, and Cybernetics, vol. 6(6),
pp. 448-452, June 1976.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import AllKNN
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> allknn = AllKNN()
>>> X_res, y_res = allknn.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 887, 0: 100}})
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"kind_sel": [StrOptions({"all", "mode"})],
"allow_minority": ["boolean"],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
n_neighbors=3,
kind_sel="all",
allow_minority=False,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.n_neighbors = n_neighbors
self.kind_sel = kind_sel
self.allow_minority = allow_minority
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Create objects required by AllKNN"""
self.nn_ = check_neighbors_object(
"n_neighbors", self.n_neighbors, additional_neighbor=1
)
self.enn_ = EditedNearestNeighbours(
sampling_strategy=self.sampling_strategy,
n_neighbors=self.nn_,
kind_sel=self.kind_sel,
n_jobs=self.n_jobs,
)
def _fit_resample(self, X, y):
self._validate_estimator()
X_, y_ = X, y
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
self.sample_indices_ = np.arange(X.shape[0], dtype=int)
for curr_size_ngh in range(1, self.nn_.n_neighbors):
self.enn_.n_neighbors = curr_size_ngh
X_enn, y_enn = self.enn_.fit_resample(X_, y_)
# Check the stopping criterion
# 1. If the number of samples in the other class become inferior to
# the number of samples in the majority class
# 2. If one of the class is disappearing
# Case 1else:
stats_enn = Counter(y_enn)
count_non_min = np.array(
[
val
for val, key in zip(stats_enn.values(), stats_enn.keys())
if key != class_minority
]
)
b_min_bec_maj = np.any(count_non_min < target_stats[class_minority])
if self.allow_minority:
# overwrite b_min_bec_maj
b_min_bec_maj = False
# Case 2
b_remove_maj_class = len(stats_enn) < len(target_stats)
(
X_,
y_,
) = (
X_enn,
y_enn,
)
self.sample_indices_ = self.sample_indices_[self.enn_.sample_indices_]
if b_min_bec_maj or b_remove_maj_class:
break
X_resampled, y_resampled = X_, y_
return X_resampled, y_resampled
def _more_tags(self):
return {"sample_indices": True}
| 21,490 | 33.440705 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_nearmiss.py | """Class to perform under-sampling based on nearmiss methods."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
import warnings
from collections import Counter
import numpy as np
from sklearn.utils import _safe_indexing
from ...utils import Substitution, check_neighbors_object
from ...utils._docstring import _n_jobs_docstring
from ...utils._param_validation import HasMethods, Interval
from ..base import BaseUnderSampler
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class NearMiss(BaseUnderSampler):
"""Class to perform under-sampling based on NearMiss methods.
Read more in the :ref:`User Guide <controlled_under_sampling>`.
Parameters
----------
{sampling_strategy}
version : int, default=1
Version of the NearMiss to use. Possible values are 1, 2 or 3.
n_neighbors : int or estimator object, default=3
If ``int``, size of the neighbourhood to consider to compute the
average distance to the minority point samples. If object, an
estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
By default, it will be a 3-NN.
n_neighbors_ver3 : int or estimator object, default=3
If ``int``, NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected create the
subset in which the selection will be performed. If object, an
estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
By default, it will be a 3-NN.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_ : estimator object
Validated K-nearest Neighbours object created from `n_neighbors` parameter.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
RandomUnderSampler : Random undersample the dataset.
InstanceHardnessThreshold : Use of classifier to undersample a dataset.
Notes
-----
The methods are based on [1]_.
Supports multi-class resampling.
References
----------
.. [1] I. Mani, I. Zhang. "kNN approach to unbalanced data distributions:
a case study involving information extraction," In Proceedings of
workshop on learning from imbalanced datasets, 2003.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import NearMiss
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> nm = NearMiss()
>>> X_res, y_res = nm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 100, 1: 100}})
"""
_parameter_constraints: dict = {
**BaseUnderSampler._parameter_constraints,
"version": [Interval(numbers.Integral, 1, 3, closed="both")],
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"n_neighbors_ver3": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
version=1,
n_neighbors=3,
n_neighbors_ver3=3,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.version = version
self.n_neighbors = n_neighbors
self.n_neighbors_ver3 = n_neighbors_ver3
self.n_jobs = n_jobs
def _selection_dist_based(
self, X, y, dist_vec, num_samples, key, sel_strategy="nearest"
):
"""Select the appropriate samples depending of the strategy selected.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Original samples.
y : array-like, shape (n_samples,)
Associated label to X.
dist_vec : ndarray, shape (n_samples, )
The distance matrix to the nearest neigbour.
num_samples: int
The desired number of samples to select.
key : str or int,
The target class.
sel_strategy : str, optional (default='nearest')
Strategy to select the samples. Either 'nearest' or 'farthest'
Returns
-------
idx_sel : ndarray, shape (num_samples,)
The list of the indices of the selected samples.
"""
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.nn_.n_neighbors :], axis=1)
target_class_indices = np.flatnonzero(y == key)
if dist_vec.shape[0] != _safe_indexing(X, target_class_indices).shape[0]:
raise RuntimeError(
"The samples to be selected do not correspond"
" to the distance matrix given. Ensure that"
" both `X[y == key]` and `dist_vec` are"
" related."
)
# Sort the list of distance and get the index
if sel_strategy == "nearest":
sort_way = False
else: # sel_strategy == "farthest":
sort_way = True
sorted_idx = sorted(
range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way,
)
# Throw a warning to tell the user that we did not have enough samples
# to select and that we just select everything
if len(sorted_idx) < num_samples:
warnings.warn(
"The number of the samples to be selected is larger"
" than the number of samples available. The"
" balancing ratio cannot be ensure and all samples"
" will be returned."
)
# Select the desired number of samples
return sorted_idx[:num_samples]
def _validate_estimator(self):
"""Private function to create the NN estimator"""
self.nn_ = check_neighbors_object("n_neighbors", self.n_neighbors)
self.nn_.set_params(**{"n_jobs": self.n_jobs})
if self.version == 3:
self.nn_ver3_ = check_neighbors_object(
"n_neighbors_ver3", self.n_neighbors_ver3
)
self.nn_ver3_.set_params(**{"n_jobs": self.n_jobs})
def _fit_resample(self, X, y):
self._validate_estimator()
idx_under = np.empty((0,), dtype=int)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
minority_class_indices = np.flatnonzero(y == class_minority)
self.nn_.fit(_safe_indexing(X, minority_class_indices))
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
n_samples = self.sampling_strategy_[target_class]
target_class_indices = np.flatnonzero(y == target_class)
X_class = _safe_indexing(X, target_class_indices)
y_class = _safe_indexing(y, target_class_indices)
if self.version == 1:
dist_vec, idx_vec = self.nn_.kneighbors(
X_class, n_neighbors=self.nn_.n_neighbors
)
index_target_class = self._selection_dist_based(
X,
y,
dist_vec,
n_samples,
target_class,
sel_strategy="nearest",
)
elif self.version == 2:
dist_vec, idx_vec = self.nn_.kneighbors(
X_class, n_neighbors=target_stats[class_minority]
)
index_target_class = self._selection_dist_based(
X,
y,
dist_vec,
n_samples,
target_class,
sel_strategy="nearest",
)
elif self.version == 3:
self.nn_ver3_.fit(X_class)
dist_vec, idx_vec = self.nn_ver3_.kneighbors(
_safe_indexing(X, minority_class_indices)
)
idx_vec_farthest = np.unique(idx_vec.reshape(-1))
X_class_selected = _safe_indexing(X_class, idx_vec_farthest)
y_class_selected = _safe_indexing(y_class, idx_vec_farthest)
dist_vec, idx_vec = self.nn_.kneighbors(
X_class_selected, n_neighbors=self.nn_.n_neighbors
)
index_target_class = self._selection_dist_based(
X_class_selected,
y_class_selected,
dist_vec,
n_samples,
target_class,
sel_strategy="farthest",
)
# idx_tmp is relative to the feature selected in the
# previous step and we need to find the indirection
index_target_class = idx_vec_farthest[index_target_class]
else:
index_target_class = slice(None)
idx_under = np.concatenate(
(
idx_under,
np.flatnonzero(y == target_class)[index_target_class],
),
axis=0,
)
self.sample_indices_ = idx_under
return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)
# fmt: off
def _more_tags(self):
return {
"sample_indices": True,
"_xfail_checks": {
"check_samplers_fit_resample":
"Fails for NearMiss-3 with less samples than expected"
}
}
# fmt: on
| 11,121 | 34.307937 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_one_sided_selection.py | """Class to perform under-sampling based on one-sided selection method."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
import warnings
from collections import Counter
import numpy as np
from sklearn.base import clone
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import _safe_indexing, check_random_state
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring, _random_state_docstring
from ...utils._param_validation import HasMethods, Interval
from ..base import BaseCleaningSampler
from ._tomek_links import TomekLinks
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class OneSidedSelection(BaseCleaningSampler):
"""Class to perform under-sampling based on one-sided selection method.
Read more in the :ref:`User Guide <condensed_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
{random_state}
n_neighbors : int or estimator object, default=None
If ``int``, size of the neighbourhood to consider to compute the
nearest neighbors. If object, an estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors. If `None`, a
:class:`~sklearn.neighbors.KNeighborsClassifier` with a 1-NN rules will
be used.
n_seeds_S : int, default=1
Number of samples to extract in order to build the set S.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
estimator_ : estimator object
Validated K-nearest neighbors estimator created from parameter `n_neighbors`.
.. deprecated:: 0.12
`estimator_` is deprecated in 0.12 and will be removed in 0.14. Use
`estimators_` instead that contains the list of all K-nearest
neighbors estimator used for each pair of class.
estimators_ : list of estimator objects of shape (n_resampled_classes - 1,)
Contains the K-nearest neighbor estimator used for per of classes.
.. versionadded:: 0.12
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
EditedNearestNeighbours : Undersample by editing noisy samples.
Notes
-----
The method is based on [1]_.
Supports multi-class resampling. A one-vs.-one scheme is used when sampling
a class as proposed in [1]_. For each class to be sampled, all samples of
this class and the minority class are used during the sampling procedure.
References
----------
.. [1] M. Kubat, S. Matwin, "Addressing the curse of imbalanced training
sets: one-sided selection," In ICML, vol. 97, pp. 179-186, 1997.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import OneSidedSelection
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> oss = OneSidedSelection(random_state=42)
>>> X_res, y_res = oss.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 496, 0: 100}})
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
None,
],
"n_seeds_S": [Interval(numbers.Integral, 1, None, closed="left")],
"n_jobs": [numbers.Integral, None],
"random_state": ["random_state"],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
n_neighbors=None,
n_seeds_S=1,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.n_neighbors = n_neighbors
self.n_seeds_S = n_seeds_S
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Private function to create the NN estimator"""
if self.n_neighbors is None:
estimator = KNeighborsClassifier(n_neighbors=1, n_jobs=self.n_jobs)
elif isinstance(self.n_neighbors, int):
estimator = KNeighborsClassifier(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs
)
elif isinstance(self.n_neighbors, KNeighborsClassifier):
estimator = clone(self.n_neighbors)
return estimator
def _fit_resample(self, X, y):
estimator = self._validate_estimator()
random_state = check_random_state(self.random_state)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
idx_under = np.empty((0,), dtype=int)
self.estimators_ = []
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
# select a sample from the current class
idx_maj = np.flatnonzero(y == target_class)
sel_idx_maj = random_state.randint(
low=0, high=target_stats[target_class], size=self.n_seeds_S
)
idx_maj_sample = idx_maj[sel_idx_maj]
minority_class_indices = np.flatnonzero(y == class_minority)
C_indices = np.append(minority_class_indices, idx_maj_sample)
# create the set composed of all minority samples and one
# sample from the current class.
C_x = _safe_indexing(X, C_indices)
C_y = _safe_indexing(y, C_indices)
# create the set S with removing the seed from S
# since that it will be added anyway
idx_maj_extracted = np.delete(idx_maj, sel_idx_maj, axis=0)
S_x = _safe_indexing(X, idx_maj_extracted)
S_y = _safe_indexing(y, idx_maj_extracted)
self.estimators_.append(clone(estimator).fit(C_x, C_y))
pred_S_y = self.estimators_[-1].predict(S_x)
S_misclassified_indices = np.flatnonzero(pred_S_y != S_y)
idx_tmp = idx_maj_extracted[S_misclassified_indices]
idx_under = np.concatenate((idx_under, idx_maj_sample, idx_tmp), axis=0)
else:
idx_under = np.concatenate(
(idx_under, np.flatnonzero(y == target_class)), axis=0
)
X_resampled = _safe_indexing(X, idx_under)
y_resampled = _safe_indexing(y, idx_under)
# apply Tomek cleaning
tl = TomekLinks(sampling_strategy=list(self.sampling_strategy_.keys()))
X_cleaned, y_cleaned = tl.fit_resample(X_resampled, y_resampled)
self.sample_indices_ = _safe_indexing(idx_under, tl.sample_indices_)
return X_cleaned, y_cleaned
@property
def estimator_(self):
"""Last fitted k-NN estimator."""
warnings.warn(
"`estimator_` attribute has been deprecated in 0.12 and will be "
"removed in 0.14. Use `estimators_` instead.",
FutureWarning,
)
return self.estimators_[-1]
def _more_tags(self):
return {"sample_indices": True}
| 8,259 | 35.22807 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_tomek_links.py | """Class to perform under-sampling by removing Tomek's links."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# License: MIT
import numbers
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import _safe_indexing
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring
from ..base import BaseCleaningSampler
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class TomekLinks(BaseCleaningSampler):
"""Under-sampling by removing Tomek's links.
Read more in the :ref:`User Guide <tomek_links>`.
Parameters
----------
{sampling_strategy}
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
EditedNearestNeighbours : Undersample by samples edition.
CondensedNearestNeighbour : Undersample by samples condensation.
RandomUnderSampler : Randomly under-sample the dataset.
Notes
-----
This method is based on [1]_.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] I. Tomek, "Two modifications of CNN," In Systems, Man, and
Cybernetics, IEEE Transactions on, vol. 6, pp 769-772, 1976.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import TomekLinks
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> tl = TomekLinks()
>>> X_res, y_res = tl.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 897, 0: 100}})
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"n_jobs": [numbers.Integral, None],
}
def __init__(self, *, sampling_strategy="auto", n_jobs=None):
super().__init__(sampling_strategy=sampling_strategy)
self.n_jobs = n_jobs
@staticmethod
def is_tomek(y, nn_index, class_type):
"""Detect if samples are Tomek's link.
More precisely, it uses the target vector and the first neighbour of
every sample point and looks for Tomek pairs. Returning a boolean
vector with True for majority Tomek links.
Parameters
----------
y : ndarray of shape (n_samples,)
Target vector of the data set, necessary to keep track of whether a
sample belongs to minority or not.
nn_index : ndarray of shape (len(y),)
The index of the closes nearest neighbour to a sample point.
class_type : int or str
The label of the minority class.
Returns
-------
is_tomek : ndarray of shape (len(y), )
Boolean vector on len( # samples ), with True for majority samples
that are Tomek links.
"""
links = np.zeros(len(y), dtype=bool)
# find which class to not consider
class_excluded = [c for c in np.unique(y) if c not in class_type]
# there is a Tomek link between two samples if they are both nearest
# neighbors of each others.
for index_sample, target_sample in enumerate(y):
if target_sample in class_excluded:
continue
if y[nn_index[index_sample]] != target_sample:
if nn_index[nn_index[index_sample]] == index_sample:
links[index_sample] = True
return links
def _fit_resample(self, X, y):
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)
nn.fit(X)
nns = nn.kneighbors(X, return_distance=False)[:, 1]
links = self.is_tomek(y, nns, self.sampling_strategy_)
self.sample_indices_ = np.flatnonzero(np.logical_not(links))
return (
_safe_indexing(X, self.sample_indices_),
_safe_indexing(y, self.sample_indices_),
)
def _more_tags(self):
return {"sample_indices": True}
| 5,115 | 30.776398 | 79 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_instance_hardness_threshold.py | """Class to perform under-sampling based on the instance hardness
threshold."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Dayvid Oliveira
# Christos Aridas
# License: MIT
import numbers
from collections import Counter
import numpy as np
from sklearn.base import ClassifierMixin, clone
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble._base import _set_random_states
from sklearn.model_selection import StratifiedKFold, cross_val_predict
from sklearn.utils import _safe_indexing, check_random_state
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring, _random_state_docstring
from ...utils._param_validation import HasMethods
from ..base import BaseUnderSampler
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class InstanceHardnessThreshold(BaseUnderSampler):
"""Undersample based on the instance hardness threshold.
Read more in the :ref:`User Guide <instance_hardness_threshold>`.
Parameters
----------
estimator : estimator object, default=None
Classifier to be used to estimate instance hardness of the samples.
This classifier should implement `predict_proba`.
{sampling_strategy}
{random_state}
cv : int, default=5
Number of folds to be used when estimating samples' instance hardness.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
correspond to the class labels from which to sample and the values
are the number of samples to sample.
estimator_ : estimator object
The validated classifier used to estimate the instance hardness of the samples.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
NearMiss : Undersample based on near-miss search.
RandomUnderSampler : Random under-sampling.
Notes
-----
The method is based on [1]_.
Supports multi-class resampling: from each class to be under-sampled, it
retains the observations with the highest probability of being correctly
classified.
References
----------
.. [1] D. Smith, Michael R., Tony Martinez, and Christophe Giraud-Carrier.
"An instance level analysis of data complexity." Machine learning
95.2 (2014): 225-256.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import InstanceHardnessThreshold
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> iht = InstanceHardnessThreshold(random_state=42)
>>> X_res, y_res = iht.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 5..., 0: 100}})
"""
_parameter_constraints: dict = {
**BaseUnderSampler._parameter_constraints,
"estimator": [
HasMethods(["fit", "predict_proba"]),
None,
],
"cv": ["cv_object"],
"n_jobs": [numbers.Integral, None],
"random_state": ["random_state"],
}
def __init__(
self,
*,
estimator=None,
sampling_strategy="auto",
random_state=None,
cv=5,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.estimator = estimator
self.cv = cv
self.n_jobs = n_jobs
def _validate_estimator(self, random_state):
"""Private function to create the classifier"""
if (
self.estimator is not None
and isinstance(self.estimator, ClassifierMixin)
and hasattr(self.estimator, "predict_proba")
):
self.estimator_ = clone(self.estimator)
_set_random_states(self.estimator_, random_state)
elif self.estimator is None:
self.estimator_ = RandomForestClassifier(
n_estimators=100,
random_state=self.random_state,
n_jobs=self.n_jobs,
)
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
self._validate_estimator(random_state)
target_stats = Counter(y)
skf = StratifiedKFold(
n_splits=self.cv,
shuffle=True,
random_state=random_state,
)
probabilities = cross_val_predict(
self.estimator_,
X,
y,
cv=skf,
n_jobs=self.n_jobs,
method="predict_proba",
)
probabilities = probabilities[range(len(y)), y]
idx_under = np.empty((0,), dtype=int)
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
n_samples = self.sampling_strategy_[target_class]
threshold = np.percentile(
probabilities[y == target_class],
(1.0 - (n_samples / target_stats[target_class])) * 100.0,
)
index_target_class = np.flatnonzero(
probabilities[y == target_class] >= threshold
)
else:
index_target_class = slice(None)
idx_under = np.concatenate(
(
idx_under,
np.flatnonzero(y == target_class)[index_target_class],
),
axis=0,
)
self.sample_indices_ = idx_under
return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)
def _more_tags(self):
return {"sample_indices": True}
| 6,500 | 30.712195 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/__init__.py | """
The :mod:`imblearn.under_sampling.prototype_selection` submodule contains
methods that select samples in order to balance the dataset.
"""
from ._condensed_nearest_neighbour import CondensedNearestNeighbour
from ._edited_nearest_neighbours import (
AllKNN,
EditedNearestNeighbours,
RepeatedEditedNearestNeighbours,
)
from ._instance_hardness_threshold import InstanceHardnessThreshold
from ._nearmiss import NearMiss
from ._neighbourhood_cleaning_rule import NeighbourhoodCleaningRule
from ._one_sided_selection import OneSidedSelection
from ._random_under_sampler import RandomUnderSampler
from ._tomek_links import TomekLinks
__all__ = [
"RandomUnderSampler",
"InstanceHardnessThreshold",
"NearMiss",
"TomekLinks",
"EditedNearestNeighbours",
"RepeatedEditedNearestNeighbours",
"AllKNN",
"OneSidedSelection",
"CondensedNearestNeighbour",
"NeighbourhoodCleaningRule",
]
| 928 | 28.967742 | 73 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_random_under_sampler.py | """Class to perform random under-sampling."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.utils import _safe_indexing, check_random_state
from ...utils import Substitution, check_target_type
from ...utils._docstring import _random_state_docstring
from ...utils._validation import _check_X
from ..base import BaseUnderSampler
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomUnderSampler(BaseUnderSampler):
"""Class to perform random under-sampling.
Under-sample the majority class(es) by randomly picking samples
with or without replacement.
Read more in the :ref:`User Guide <controlled_under_sampling>`.
Parameters
----------
{sampling_strategy}
{random_state}
replacement : bool, default=False
Whether the sample is with or without replacement.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
NearMiss : Undersample using near-miss samples.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import RandomUnderSampler
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> rus = RandomUnderSampler(random_state=42)
>>> X_res, y_res = rus.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 100, 1: 100}})
"""
_parameter_constraints: dict = {
**BaseUnderSampler._parameter_constraints,
"replacement": ["boolean"],
"random_state": ["random_state"],
}
def __init__(
self, *, sampling_strategy="auto", random_state=None, replacement=False
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.replacement = replacement
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X = _check_X(X)
self._check_n_features(X, reset=True)
self._check_feature_names(X, reset=True)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
idx_under = np.empty((0,), dtype=int)
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
n_samples = self.sampling_strategy_[target_class]
index_target_class = random_state.choice(
range(np.count_nonzero(y == target_class)),
size=n_samples,
replace=self.replacement,
)
else:
index_target_class = slice(None)
idx_under = np.concatenate(
(
idx_under,
np.flatnonzero(y == target_class)[index_target_class],
),
axis=0,
)
self.sample_indices_ = idx_under
return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
"_xfail_checks": {
"check_complex_data": "Robust to this type of data.",
},
}
| 4,601 | 31.181818 | 79 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_condensed_nearest_neighbour.py | """Class to perform under-sampling based on the condensed nearest neighbour
method."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
import warnings
from collections import Counter
import numpy as np
from scipy.sparse import issparse
from sklearn.base import clone
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import _safe_indexing, check_random_state
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring, _random_state_docstring
from ...utils._param_validation import HasMethods, Interval
from ..base import BaseCleaningSampler
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class CondensedNearestNeighbour(BaseCleaningSampler):
"""Undersample based on the condensed nearest neighbour method.
Read more in the :ref:`User Guide <condensed_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
{random_state}
n_neighbors : int or estimator object, default=None
If ``int``, size of the neighbourhood to consider to compute the
nearest neighbors. If object, an estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors. If `None`, a
:class:`~sklearn.neighbors.KNeighborsClassifier` with a 1-NN rules will
be used.
n_seeds_S : int, default=1
Number of samples to extract in order to build the set S.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
estimator_ : estimator object
The validated K-nearest neighbor estimator created from `n_neighbors` parameter.
.. deprecated:: 0.12
`estimator_` is deprecated in 0.12 and will be removed in 0.14. Use
`estimators_` instead that contains the list of all K-nearest
neighbors estimator used for each pair of class.
estimators_ : list of estimator objects of shape (n_resampled_classes - 1,)
Contains the K-nearest neighbor estimator used for per of classes.
.. versionadded:: 0.12
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
EditedNearestNeighbours : Undersample by editing samples.
RepeatedEditedNearestNeighbours : Undersample by repeating ENN algorithm.
AllKNN : Undersample using ENN and various number of neighbours.
Notes
-----
The method is based on [1]_.
Supports multi-class resampling: a strategy one (minority) vs. each other
classes is applied.
References
----------
.. [1] P. Hart, "The condensed nearest neighbor rule,"
In Information Theory, IEEE Transactions on, vol. 14(3),
pp. 515-516, 1968.
Examples
--------
>>> from collections import Counter # doctest: +SKIP
>>> from sklearn.datasets import fetch_mldata # doctest: +SKIP
>>> from imblearn.under_sampling import \
CondensedNearestNeighbour # doctest: +SKIP
>>> pima = fetch_mldata('diabetes_scale') # doctest: +SKIP
>>> X, y = pima['data'], pima['target'] # doctest: +SKIP
>>> print('Original dataset shape %s' % Counter(y)) # doctest: +SKIP
Original dataset shape Counter({{1: 500, -1: 268}}) # doctest: +SKIP
>>> cnn = CondensedNearestNeighbour(random_state=42) # doctest: +SKIP
>>> X_res, y_res = cnn.fit_resample(X, y) #doctest: +SKIP
>>> print('Resampled dataset shape %s' % Counter(y_res)) # doctest: +SKIP
Resampled dataset shape Counter({{-1: 268, 1: 227}}) # doctest: +SKIP
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
None,
],
"n_seeds_S": [Interval(numbers.Integral, 1, None, closed="left")],
"n_jobs": [numbers.Integral, None],
"random_state": ["random_state"],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
n_neighbors=None,
n_seeds_S=1,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.n_neighbors = n_neighbors
self.n_seeds_S = n_seeds_S
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Private function to create the NN estimator"""
if self.n_neighbors is None:
estimator = KNeighborsClassifier(n_neighbors=1, n_jobs=self.n_jobs)
elif isinstance(self.n_neighbors, numbers.Integral):
estimator = KNeighborsClassifier(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs
)
elif isinstance(self.n_neighbors, KNeighborsClassifier):
estimator = clone(self.n_neighbors)
return estimator
def _fit_resample(self, X, y):
estimator = self._validate_estimator()
random_state = check_random_state(self.random_state)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
idx_under = np.empty((0,), dtype=int)
self.estimators_ = []
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
# Randomly get one sample from the majority class
# Generate the index to select
idx_maj = np.flatnonzero(y == target_class)
idx_maj_sample = idx_maj[
random_state.randint(
low=0,
high=target_stats[target_class],
size=self.n_seeds_S,
)
]
# Create the set C - One majority samples and all minority
C_indices = np.append(
np.flatnonzero(y == class_minority), idx_maj_sample
)
C_x = _safe_indexing(X, C_indices)
C_y = _safe_indexing(y, C_indices)
# Create the set S - all majority samples
S_indices = np.flatnonzero(y == target_class)
S_x = _safe_indexing(X, S_indices)
S_y = _safe_indexing(y, S_indices)
# fit knn on C
self.estimators_.append(clone(estimator).fit(C_x, C_y))
good_classif_label = idx_maj_sample.copy()
# Check each sample in S if we keep it or drop it
for idx_sam, (x_sam, y_sam) in enumerate(zip(S_x, S_y)):
# Do not select sample which are already well classified
if idx_sam in good_classif_label:
continue
# Classify on S
if not issparse(x_sam):
x_sam = x_sam.reshape(1, -1)
pred_y = self.estimators_[-1].predict(x_sam)
# If the prediction do not agree with the true label
# append it in C_x
if y_sam != pred_y:
# Keep the index for later
idx_maj_sample = np.append(idx_maj_sample, idx_maj[idx_sam])
# Update C
C_indices = np.append(C_indices, idx_maj[idx_sam])
C_x = _safe_indexing(X, C_indices)
C_y = _safe_indexing(y, C_indices)
# fit a knn on C
self.estimators_[-1].fit(C_x, C_y)
# This experimental to speed up the search
# Classify all the element in S and avoid to test the
# well classified elements
pred_S_y = self.estimators_[-1].predict(S_x)
good_classif_label = np.unique(
np.append(idx_maj_sample, np.flatnonzero(pred_S_y == S_y))
)
idx_under = np.concatenate((idx_under, idx_maj_sample), axis=0)
else:
idx_under = np.concatenate(
(idx_under, np.flatnonzero(y == target_class)), axis=0
)
self.sample_indices_ = idx_under
return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)
@property
def estimator_(self):
"""Last fitted k-NN estimator."""
warnings.warn(
"`estimator_` attribute has been deprecated in 0.12 and will be "
"removed in 0.14. Use `estimators_` instead.",
FutureWarning,
)
return self.estimators_[-1]
def _more_tags(self):
return {"sample_indices": True}
| 9,474 | 35.583012 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/_neighbourhood_cleaning_rule.py | """Class performing under-sampling based on the neighbourhood cleaning rule."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
import warnings
from collections import Counter
import numpy as np
from sklearn.base import clone
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.utils import _safe_indexing
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring
from ...utils._param_validation import HasMethods, Hidden, Interval, StrOptions
from ..base import BaseCleaningSampler
from ._edited_nearest_neighbours import EditedNearestNeighbours
SEL_KIND = ("all", "mode")
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class NeighbourhoodCleaningRule(BaseCleaningSampler):
"""Undersample based on the neighbourhood cleaning rule.
This class uses ENN and a k-NN to remove noisy samples from the datasets.
Read more in the :ref:`User Guide <condensed_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
edited_nearest_neighbours : estimator object, default=None
The :class:`~imblearn.under_sampling.EditedNearestNeighbours` (ENN)
object to clean the dataset. If `None`, a default ENN is created with
`kind_sel="mode"` and `n_neighbors=n_neighbors`.
n_neighbors : int or estimator object, default=3
If ``int``, size of the neighbourhood to consider to compute the
K-nearest neighbors. If object, an estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors. By default, it will be a 3-NN.
kind_sel : {{"all", "mode"}}, default='all'
Strategy to use in order to exclude samples in the ENN sampling.
- If ``'all'``, all neighbours will have to agree with the samples of
interest to not be excluded.
- If ``'mode'``, the majority vote of the neighbours will be used in
order to exclude a sample.
The strategy `"all"` will be less conservative than `'mode'`. Thus,
more samples will be removed when `kind_sel="all"` generally.
.. deprecated:: 0.12
`kind_sel` is deprecated in 0.12 and will be removed in 0.14.
Currently the parameter has no effect and corresponds always to the
`"all"` strategy.
threshold_cleaning : float, default=0.5
Threshold used to whether consider a class or not during the cleaning
after applying ENN. A class will be considered during cleaning when:
Ci > C x T ,
where Ci and C is the number of samples in the class and the data set,
respectively and theta is the threshold.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
edited_nearest_neighbours_ : estimator object
The edited nearest neighbour object used to make the first resampling.
nn_ : estimator object
Validated K-nearest Neighbours object created from `n_neighbors` parameter.
classes_to_clean_ : list
The classes considered with under-sampling by `nn_` in the second cleaning
phase.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
EditedNearestNeighbours : Undersample by editing noisy samples.
Notes
-----
See the original paper: [1]_.
Supports multi-class resampling. A one-vs.-rest scheme is used when
sampling a class as proposed in [1]_.
References
----------
.. [1] J. Laurikkala, "Improving identification of difficult small classes
by balancing class distribution," Springer Berlin Heidelberg, 2001.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import NeighbourhoodCleaningRule
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ncr = NeighbourhoodCleaningRule()
>>> X_res, y_res = ncr.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 888, 0: 100}})
"""
_parameter_constraints: dict = {
**BaseCleaningSampler._parameter_constraints,
"edited_nearest_neighbours": [
HasMethods(["fit_resample"]),
None,
],
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"kind_sel": [StrOptions({"all", "mode"}), Hidden(StrOptions({"deprecated"}))],
"threshold_cleaning": [Interval(numbers.Real, 0, None, closed="neither")],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
edited_nearest_neighbours=None,
n_neighbors=3,
kind_sel="deprecated",
threshold_cleaning=0.5,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.edited_nearest_neighbours = edited_nearest_neighbours
self.n_neighbors = n_neighbors
self.kind_sel = kind_sel
self.threshold_cleaning = threshold_cleaning
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Create the objects required by NCR."""
if isinstance(self.n_neighbors, numbers.Integral):
self.nn_ = KNeighborsClassifier(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs
)
elif isinstance(self.n_neighbors, NearestNeighbors):
# backward compatibility when passing a NearestNeighbors object
self.nn_ = KNeighborsClassifier(
n_neighbors=self.n_neighbors.n_neighbors - 1, n_jobs=self.n_jobs
)
else:
self.nn_ = clone(self.n_neighbors)
if self.edited_nearest_neighbours is None:
self.edited_nearest_neighbours_ = EditedNearestNeighbours(
sampling_strategy=self.sampling_strategy,
n_neighbors=self.n_neighbors,
kind_sel="mode",
n_jobs=self.n_jobs,
)
else:
self.edited_nearest_neighbours_ = clone(self.edited_nearest_neighbours)
def _fit_resample(self, X, y):
if self.kind_sel != "deprecated":
warnings.warn(
"`kind_sel` is deprecated in 0.12 and will be removed in 0.14. "
"It already has not effect and corresponds to the `'all'` option.",
FutureWarning,
)
self._validate_estimator()
self.edited_nearest_neighbours_.fit_resample(X, y)
index_not_a1 = self.edited_nearest_neighbours_.sample_indices_
index_a1 = np.ones(y.shape, dtype=bool)
index_a1[index_not_a1] = False
index_a1 = np.flatnonzero(index_a1)
# clean the neighborhood
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
# compute which classes to consider for cleaning for the A2 group
self.classes_to_clean_ = [
c
for c, n_samples in target_stats.items()
if (
c in self.sampling_strategy_.keys()
and (n_samples > target_stats[class_minority] * self.threshold_cleaning)
)
]
self.nn_.fit(X, y)
class_minority_indices = np.flatnonzero(y == class_minority)
X_minority = _safe_indexing(X, class_minority_indices)
y_minority = _safe_indexing(y, class_minority_indices)
y_pred_minority = self.nn_.predict(X_minority)
# add an additional sample since the query points contains the original dataset
neighbors_to_minority_indices = self.nn_.kneighbors(
X_minority, n_neighbors=self.nn_.n_neighbors + 1, return_distance=False
)[:, 1:]
mask_misclassified_minority = y_pred_minority != y_minority
index_a2 = np.ravel(neighbors_to_minority_indices[mask_misclassified_minority])
index_a2 = np.array(
[
index
for index in np.unique(index_a2)
if y[index] in self.classes_to_clean_
]
)
union_a1_a2 = np.union1d(index_a1, index_a2).astype(int)
selected_samples = np.ones(y.shape, dtype=bool)
selected_samples[union_a1_a2] = False
self.sample_indices_ = np.flatnonzero(selected_samples)
return (
_safe_indexing(X, self.sample_indices_),
_safe_indexing(y, self.sample_indices_),
)
def _more_tags(self):
return {"sample_indices": True}
| 9,614 | 36.123552 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_edited_nearest_neighbours.py | """Test the module edited nearest neighbour."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.datasets import make_classification
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import EditedNearestNeighbours
X = np.array(
[
[2.59928271, 0.93323465],
[0.25738379, 0.95564169],
[1.42772181, 0.526027],
[1.92365863, 0.82718767],
[-0.10903849, -0.12085181],
[-0.284881, -0.62730973],
[0.57062627, 1.19528323],
[0.03394306, 0.03986753],
[0.78318102, 2.59153329],
[0.35831463, 1.33483198],
[-0.14313184, -1.0412815],
[0.01936241, 0.17799828],
[-1.25020462, -0.40402054],
[-0.09816301, -0.74662486],
[-0.01252787, 0.34102657],
[0.52726792, -0.38735648],
[0.2821046, -0.07862747],
[0.05230552, 0.09043907],
[0.15198585, 0.12512646],
[0.70524765, 0.39816382],
]
)
Y = np.array([1, 2, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 1, 2, 2, 2, 2, 1, 2, 1])
def test_enn_init():
enn = EditedNearestNeighbours()
assert enn.n_neighbors == 3
assert enn.kind_sel == "all"
assert enn.n_jobs is None
def test_enn_fit_resample():
enn = EditedNearestNeighbours()
X_resampled, y_resampled = enn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.10903849, -0.12085181],
[0.01936241, 0.17799828],
[2.59928271, 0.93323465],
[1.92365863, 0.82718767],
[0.25738379, 0.95564169],
[0.78318102, 2.59153329],
[0.52726792, -0.38735648],
]
)
y_gt = np.array([0, 0, 1, 1, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_enn_fit_resample_mode():
enn = EditedNearestNeighbours(kind_sel="mode")
X_resampled, y_resampled = enn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.10903849, -0.12085181],
[0.01936241, 0.17799828],
[2.59928271, 0.93323465],
[1.42772181, 0.526027],
[1.92365863, 0.82718767],
[0.25738379, 0.95564169],
[-0.284881, -0.62730973],
[0.57062627, 1.19528323],
[0.78318102, 2.59153329],
[0.35831463, 1.33483198],
[-0.14313184, -1.0412815],
[-0.09816301, -0.74662486],
[0.52726792, -0.38735648],
[0.2821046, -0.07862747],
]
)
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_enn_fit_resample_with_nn_object():
nn = NearestNeighbors(n_neighbors=4)
enn = EditedNearestNeighbours(n_neighbors=nn, kind_sel="mode")
X_resampled, y_resampled = enn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.10903849, -0.12085181],
[0.01936241, 0.17799828],
[2.59928271, 0.93323465],
[1.42772181, 0.526027],
[1.92365863, 0.82718767],
[0.25738379, 0.95564169],
[-0.284881, -0.62730973],
[0.57062627, 1.19528323],
[0.78318102, 2.59153329],
[0.35831463, 1.33483198],
[-0.14313184, -1.0412815],
[-0.09816301, -0.74662486],
[0.52726792, -0.38735648],
[0.2821046, -0.07862747],
]
)
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_enn_check_kind_selection():
"""Check that `check_sel="all"` is more conservative than
`check_sel="mode"`."""
X, y = make_classification(
n_samples=1000,
n_classes=2,
weights=[0.3, 0.7],
random_state=0,
)
enn_all = EditedNearestNeighbours(kind_sel="all")
enn_mode = EditedNearestNeighbours(kind_sel="mode")
enn_all.fit_resample(X, y)
enn_mode.fit_resample(X, y)
assert enn_all.sample_indices_.size < enn_mode.sample_indices_.size
| 4,212 | 28.879433 | 74 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_nearmiss.py | """Test the module nearmiss."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import NearMiss
X = np.array(
[
[1.17737838, -0.2002118],
[0.4960075, 0.86130762],
[-0.05903827, 0.10947647],
[0.91464286, 1.61369212],
[-0.54619583, 1.73009918],
[-0.60413357, 0.24628718],
[0.45713638, 1.31069295],
[-0.04032409, 3.01186964],
[0.03142011, 0.12323596],
[0.50701028, -0.17636928],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[0.99272351, -0.11631728],
[-1.95581933, 0.69609604],
[1.15157493, -1.2981518],
]
)
Y = np.array([1, 2, 1, 0, 2, 1, 2, 2, 1, 2, 0, 0, 2, 1, 2])
VERSION_NEARMISS = (1, 2, 3)
def test_nm_fit_resample_auto():
sampling_strategy = "auto"
X_gt = [
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
]
),
]
y_gt = [
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
]
for version_idx, version in enumerate(VERSION_NEARMISS):
nm = NearMiss(sampling_strategy=sampling_strategy, version=version)
X_resampled, y_resampled = nm.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt[version_idx])
assert_array_equal(y_resampled, y_gt[version_idx])
def test_nm_fit_resample_float_sampling_strategy():
sampling_strategy = {0: 3, 1: 4, 2: 4}
X_gt = [
np.array(
[
[-0.20497017, -0.26630228],
[-0.80809175, -1.09917302],
[0.91464286, 1.61369212],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[1.17737838, -0.2002118],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
[0.99272351, -0.11631728],
]
),
np.array(
[
[-0.20497017, -0.26630228],
[-0.80809175, -1.09917302],
[0.91464286, 1.61369212],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[1.17737838, -0.2002118],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
[0.99272351, -0.11631728],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[-0.05903827, 0.10947647],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
[0.45713638, 1.31069295],
]
),
]
y_gt = [
np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]),
]
for version_idx, version in enumerate(VERSION_NEARMISS):
nm = NearMiss(sampling_strategy=sampling_strategy, version=version)
X_resampled, y_resampled = nm.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt[version_idx])
assert_array_equal(y_resampled, y_gt[version_idx])
def test_nm_fit_resample_nn_obj():
sampling_strategy = "auto"
nn = NearestNeighbors(n_neighbors=3)
X_gt = [
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
]
),
]
y_gt = [
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
]
for version_idx, version in enumerate(VERSION_NEARMISS):
nm = NearMiss(
sampling_strategy=sampling_strategy,
version=version,
n_neighbors=nn,
)
X_resampled, y_resampled = nm.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt[version_idx])
assert_array_equal(y_resampled, y_gt[version_idx])
| 6,989 | 32.127962 | 75 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_repeated_edited_nearest_neighbours.py | """Test the module repeated edited nearest neighbour."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import RepeatedEditedNearestNeighbours
X = np.array(
[
[-0.12840393, 0.66446571],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.83631853, 0.18569783],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.53171468, -0.53735182],
[1.3381556, 0.35956356],
[-0.35946678, 0.72510189],
[1.32326943, 0.28393874],
[2.94290565, -0.13986434],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[-0.88864036, -0.33782387],
[-1.10146139, 0.91782682],
[-0.7969716, -0.50493969],
[0.73489726, 0.43915195],
[0.2096964, -0.61814058],
[-0.28479268, 0.70459548],
[1.84864913, 0.14729596],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.57356906, 0.30390519],
[1.0304995, -0.16955962],
[1.67314371, 0.19231498],
[0.98382284, 0.37184502],
[0.48921682, -1.38504507],
[-0.46226554, -0.50481004],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
[0.69804044, 0.44810796],
[-0.5506368, -0.42072426],
[-0.34474418, 0.21969797],
]
)
Y = np.array(
[
1,
2,
2,
2,
1,
1,
0,
2,
1,
1,
1,
2,
2,
0,
1,
2,
1,
2,
1,
1,
2,
2,
1,
1,
1,
2,
2,
2,
2,
1,
1,
2,
0,
2,
2,
2,
2,
1,
2,
0,
]
)
def test_renn_init():
renn = RepeatedEditedNearestNeighbours()
assert renn.n_neighbors == 3
assert renn.kind_sel == "all"
assert renn.n_jobs is None
def test_renn_iter_wrong():
max_iter = -1
renn = RepeatedEditedNearestNeighbours(max_iter=max_iter)
with pytest.raises(ValueError):
renn.fit_resample(X, Y)
def test_renn_fit_resample():
renn = RepeatedEditedNearestNeighbours()
X_resampled, y_resampled = renn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[0.73489726, 0.43915195],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert 0 < renn.n_iter_ <= renn.max_iter
def test_renn_fit_resample_mode_object():
renn = RepeatedEditedNearestNeighbours(kind_sel="mode")
X_resampled, y_resampled = renn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[-0.12840393, 0.66446571],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.35946678, 0.72510189],
[2.94290565, -0.13986434],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[-0.28479268, 0.70459548],
[1.84864913, 0.14729596],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[1.67314371, 0.19231498],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert 0 < renn.n_iter_ <= renn.max_iter
def test_renn_fit_resample_mode():
nn = NearestNeighbors(n_neighbors=4)
renn = RepeatedEditedNearestNeighbours(n_neighbors=nn, kind_sel="mode")
X_resampled, y_resampled = renn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[-0.12840393, 0.66446571],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.35946678, 0.72510189],
[2.94290565, -0.13986434],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[-0.28479268, 0.70459548],
[1.84864913, 0.14729596],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[1.67314371, 0.19231498],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert 0 < renn.n_iter_ <= renn.max_iter
@pytest.mark.parametrize(
"max_iter, n_iter",
[(2, 2), (5, 3)],
)
def test_renn_iter_attribute(max_iter, n_iter):
renn = RepeatedEditedNearestNeighbours(max_iter=max_iter)
renn.fit_resample(X, Y)
assert renn.n_iter_ == n_iter
| 8,727 | 24.746313 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_instance_hardness_threshold.py | """Test the module ."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.naive_bayes import GaussianNB as NB
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import InstanceHardnessThreshold
RND_SEED = 0
X = np.array(
[
[-0.3879569, 0.6894251],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[0.91542919, -0.65453327],
[-0.03852113, 0.40910479],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.18430329, 0.52328473],
[-0.30126957, -0.66268378],
[-0.65571327, 0.42412021],
[-0.28305528, 0.30284991],
[0.20246714, -0.34727125],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
]
)
Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])
ESTIMATOR = GradientBoostingClassifier(random_state=RND_SEED)
def test_iht_init():
sampling_strategy = "auto"
iht = InstanceHardnessThreshold(
estimator=ESTIMATOR,
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
)
assert iht.sampling_strategy == sampling_strategy
assert iht.random_state == RND_SEED
def test_iht_fit_resample():
iht = InstanceHardnessThreshold(estimator=ESTIMATOR, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
def test_iht_fit_resample_half():
sampling_strategy = {0: 3, 1: 3}
iht = InstanceHardnessThreshold(
estimator=NB(),
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
def test_iht_fit_resample_class_obj():
est = GradientBoostingClassifier(random_state=RND_SEED)
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
def test_iht_reproducibility():
from sklearn.datasets import load_digits
X_digits, y_digits = load_digits(return_X_y=True)
idx_sampled = []
for seed in range(5):
est = RandomForestClassifier(n_estimators=10, random_state=seed)
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
iht.fit_resample(X_digits, y_digits)
idx_sampled.append(iht.sample_indices_.copy())
for idx_1, idx_2 in zip(idx_sampled, idx_sampled[1:]):
assert_array_equal(idx_1, idx_2)
def test_iht_fit_resample_default_estimator():
iht = InstanceHardnessThreshold(estimator=None, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_resample(X, Y)
assert isinstance(iht.estimator_, RandomForestClassifier)
assert X_resampled.shape == (12, 2)
assert y_resampled.shape == (12,)
| 3,118 | 31.489583 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.